diff --git a/go.mod b/go.mod index 85f6dd6964..f65c6d9199 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/blang/semver v3.5.1+incompatible github.com/cenkalti/backoff/v3 v3.2.2 github.com/cpuguy83/go-md2man v1.0.10 + github.com/creack/pty v1.1.18 github.com/docker/cli v20.10.18+incompatible github.com/docker/docker v20.10.18+incompatible github.com/fatih/color v1.13.0 @@ -17,7 +18,7 @@ require ( github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.9 - github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387 + github.com/google/go-containerregistry v0.11.0 github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b github.com/hashicorp/errwrap v1.1.0 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -36,10 +37,11 @@ require ( github.com/hashicorp/golang-lru v0.5.4 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/vault/sdk v0.6.0 - github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 + github.com/hashicorp/yamux v0.1.0 github.com/hinshun/vt10x v0.0.0-20220228203356-1ab2cad5fd82 github.com/jonboulle/clockwork v0.3.0 github.com/ktr0731/go-fuzzyfinder v0.6.0 + github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e github.com/mitchellh/copystructure v1.2.0 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.14.1 @@ -50,20 +52,21 @@ require ( github.com/ryanuber/go-glob v1.0.0 github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 - github.com/tektoncd/chains v0.12.1-0.20220901150427-1bf8faaf4475 + github.com/tektoncd/chains v0.12.1-0.20220920205308-b34353430a40 github.com/tektoncd/hub v1.9.0 - github.com/tektoncd/pipeline v0.39.0 + github.com/tektoncd/pipeline v0.40.0 github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb github.com/tektoncd/triggers v0.21.0 + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 go.opencensus.io v0.23.0 go.uber.org/atomic v1.10.0 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.23.0 - golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e - golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b - golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 + golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 + golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b + golang.org/x/sys v0.0.0-20220907062415-87db552b00fd + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 + golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 google.golang.org/grpc v1.49.0 google.golang.org/protobuf v1.28.1 gopkg.in/square/go-jose.v2 v2.6.0 @@ -74,24 +77,25 @@ require ( k8s.io/apimachinery v0.23.9 k8s.io/cli-runtime v0.23.9 k8s.io/client-go v0.23.9 - knative.dev/pkg v0.0.0-20220805012121-7b8b06028e4f + knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 sigs.k8s.io/yaml v1.3.0 ) require ( - bitbucket.org/creachadair/shell v0.0.6 // indirect - cloud.google.com/go v0.102.1 // indirect - cloud.google.com/go/compute v1.7.0 // indirect + bitbucket.org/creachadair/shell v0.0.7 // indirect + cloud.google.com/go v0.103.0 // indirect + cloud.google.com/go/compute v1.9.0 // indirect cloud.google.com/go/firestore v1.6.1 // indirect cloud.google.com/go/iam v0.3.0 // indirect cloud.google.com/go/kms v1.4.0 // indirect cloud.google.com/go/storage v1.24.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect - github.com/Azure/azure-sdk-for-go v63.3.0+incompatible // indirect + github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 // indirect + github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect + github.com/Azure/go-autorest/autorest v0.11.28 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect @@ -100,45 +104,56 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/PaesslerAG/gval v1.0.0 // indirect - github.com/PaesslerAG/jsonpath v0.1.1 // indirect - github.com/ReneKroon/ttlcache/v2 v2.11.0 // indirect github.com/Shopify/sarama v1.32.0 // indirect github.com/ThalesIgnite/crypto11 v1.2.5 // indirect + github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect + github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect + github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect + github.com/alibabacloud-go/darabonba-openapi v0.1.18 // indirect + github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 // indirect + github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect + github.com/alibabacloud-go/openapi-util v0.0.11 // indirect + github.com/alibabacloud-go/tea v1.1.18 // indirect + github.com/alibabacloud-go/tea-utils v1.4.4 // indirect + github.com/alibabacloud-go/tea-xml v1.1.2 // indirect + github.com/aliyun/credentials-go v1.2.3 // indirect github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/aws/aws-sdk-go v1.43.45 // indirect - github.com/aws/aws-sdk-go-v2 v1.16.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.15.3 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 // indirect + github.com/aws/aws-sdk-go v1.44.93 // indirect + github.com/aws/aws-sdk-go-v2 v1.16.14 // indirect + github.com/aws/aws-sdk-go-v2/config v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.12.18 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.15.0 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 // indirect - github.com/aws/smithy-go v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.18.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.21 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.17 // indirect + github.com/aws/smithy-go v1.13.2 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 // indirect github.com/benbjohnson/clock v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/blendle/zapdriver v1.3.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21 // indirect + github.com/clbanning/mxj/v2 v2.5.6 // indirect github.com/cloudevents/sdk-go/v2 v2.11.0 // indirect github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect - github.com/containerd/stargz-snapshotter/estargz v0.11.1 // indirect - github.com/coreos/go-oidc/v3 v3.1.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect + github.com/coreos/go-oidc/v3 v3.3.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/creack/pty v1.1.18 github.com/cyberphone/json-canonicalization v0.0.0-20210823021906-dc406ceaf94b // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -149,50 +164,50 @@ require ( github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/emicklei/go-restful v2.16.0+incompatible // indirect github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/fullstorydev/grpcurl v1.8.2 // indirect + github.com/fullstorydev/grpcurl v1.8.6 // indirect github.com/gdamore/encoding v1.0.0 // indirect github.com/gdamore/tcell/v2 v2.4.0 // indirect github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/runtime v0.24.0 // indirect - github.com/go-openapi/spec v0.20.5 // indirect - github.com/go-openapi/strfmt v0.21.2 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/runtime v0.24.1 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/strfmt v0.21.3 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.0 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-playground/validator/v10 v10.10.1 // indirect - github.com/go-stack/stack v1.8.1 // indirect + github.com/go-playground/validator/v10 v10.11.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.1 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/mock v1.6.0 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.11.3 // indirect - github.com/google/certificate-transparency-go v1.1.2 // indirect + github.com/google/certificate-transparency-go v1.1.3 // indirect github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220328141311-efc62d802606 // indirect github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220301182634-bfe2ffc6b6bd // indirect - github.com/google/go-github/v42 v42.0.0 // indirect + github.com/google/go-github/v45 v45.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/trillian v1.4.0 // indirect + github.com/google/trillian v1.4.1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/google/wire v0.5.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect @@ -204,10 +219,10 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3 // indirect - github.com/hashicorp/vault/api v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.2 // indirect + github.com/hashicorp/vault/api v1.7.2 // indirect github.com/imdario/mergo v0.3.12 // indirect - github.com/in-toto/in-toto-golang v0.3.4-0.20211211042327-af1f9fb822bf // indirect + github.com/in-toto/in-toto-golang v0.3.4-0.20220709202702-fa494aaa0add // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect @@ -215,16 +230,16 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.2 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b // indirect - github.com/jhump/protoreflect v1.9.0 // indirect + github.com/jellydator/ttlcache/v2 v2.11.1 // indirect + github.com/jhump/protoreflect v1.12.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joho/godotenv v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/compress v1.15.8 // indirect github.com/leodido/go-urn v1.2.1 // indirect - github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lucasb-eyer/go-colorful v1.0.3 // indirect github.com/magiconair/properties v1.8.6 // indirect @@ -233,28 +248,29 @@ require ( github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.13 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/mozillazg/docker-credential-acr-helper v0.3.0 // indirect github.com/nsf/termbox-go v0.0.0-20201124104050-ed494de23a00 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/gomega v1.19.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.3.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect + github.com/prometheus/client_golang v1.13.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/statsd_exporter v0.21.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.2.0 // indirect @@ -265,31 +281,31 @@ require ( github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/sigstore/cosign v1.8.1-0.20220504185934-6ecf405f0b92 // indirect - github.com/sigstore/fulcio v0.1.2-0.20220114150912-86a2036f9bc7 // indirect - github.com/sigstore/rekor v0.5.0 // indirect - github.com/sigstore/sigstore v1.2.1-0.20220424143412-3d41663116d5 // indirect + github.com/sigstore/cosign v1.12.0 // indirect + github.com/sigstore/fulcio v0.5.3 // indirect + github.com/sigstore/rekor v0.11.0 // indirect + github.com/sigstore/sigstore v1.4.1-0.20220908204944-ec922cf4f1c2 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.12.0 // indirect + github.com/spf13/viper v1.13.0 // indirect github.com/spiffe/go-spiffe/v2 v2.1.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/stretchr/testify v1.8.0 // indirect - github.com/subosito/gotenv v1.4.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tektoncd/resolution v0.0.0-20220331203013-e4203c70c5eb // indirect + github.com/subosito/gotenv v1.4.1 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613 // indirect github.com/thales-e-security/pool v0.0.2 // indirect - github.com/theupdateframework/go-tuf v0.0.0-20220211205608-f0c3294f63b9 // indirect - github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 + github.com/theupdateframework/go-tuf v0.5.0 // indirect + github.com/tjfoc/gmsm v1.3.2 // indirect github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect - github.com/urfave/cli v1.22.5 // indirect + github.com/transparency-dev/merkle v0.0.1 // indirect + github.com/urfave/cli v1.22.7 // indirect github.com/vbatts/tar-split v0.11.2 // indirect - github.com/xanzy/go-gitlab v0.64.0 // indirect + github.com/xanzy/go-gitlab v0.73.1 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.1 // indirect github.com/xdg-go/stringprep v1.0.3 // indirect @@ -298,46 +314,44 @@ require ( github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect github.com/zeebo/errs v1.2.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.etcd.io/etcd/api/v3 v3.5.4 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect - go.etcd.io/etcd/client/v2 v2.305.4 // indirect - go.etcd.io/etcd/client/v3 v3.5.4 // indirect - go.etcd.io/etcd/etcdctl/v3 v3.5.0 // indirect - go.etcd.io/etcd/etcdutl/v3 v3.5.0 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.0 // indirect - go.etcd.io/etcd/raft/v3 v3.5.0 // indirect - go.etcd.io/etcd/server/v3 v3.5.0 // indirect - go.etcd.io/etcd/tests/v3 v3.5.0 // indirect - go.etcd.io/etcd/v3 v3.5.0 // indirect - go.mongodb.org/mongo-driver v1.8.4 // indirect - go.opentelemetry.io/contrib v1.3.0 // indirect + go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 // indirect + go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/raft/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/server/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/tests/v3 v3.6.0-alpha.0 // indirect + go.etcd.io/etcd/v3 v3.6.0-alpha.0 // indirect + go.mongodb.org/mongo-driver v1.10.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 // indirect - go.opentelemetry.io/otel v1.3.0 // indirect - go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect - go.opentelemetry.io/otel/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk v1.3.0 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect - go.opentelemetry.io/otel/trace v1.3.0 // indirect - go.opentelemetry.io/proto/otlp v0.12.0 // indirect + go.opentelemetry.io/otel v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 // indirect + go.opentelemetry.io/otel/sdk v1.7.0 // indirect + go.opentelemetry.io/otel/trace v1.7.0 // indirect + go.opentelemetry.io/proto/otlp v0.16.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect goa.design/goa/v3 v3.8.2 // indirect - gocloud.dev v0.25.0 // indirect - gocloud.dev/docstore/mongodocstore v0.25.0 // indirect - gocloud.dev/pubsub/kafkapubsub v0.25.0 // indirect + gocloud.dev v0.26.0 // indirect + gocloud.dev/docstore/mongodocstore v0.26.0 // indirect + gocloud.dev/pubsub/kafkapubsub v0.26.0 // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 // indirect + golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect golang.org/x/tools v0.1.12 // indirect golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect - google.golang.org/api v0.85.0 // indirect + google.golang.org/api v0.95.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220803205849-8f55acc8769f // indirect + google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.23.9 // indirect @@ -350,15 +364,8 @@ require ( sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect - sigs.k8s.io/release-utils v0.6.0 // indirect + sigs.k8s.io/release-utils v0.7.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect ) -replace ( - github.com/kr/pty => github.com/creack/pty v1.1.16 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 - go.opentelemetry.io/otel => go.opentelemetry.io/otel v0.20.0 - go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v0.20.0 -) - exclude github.com/antlr/antlr4 v0.0.0-20201029161626-9a95f0cc3d7c diff --git a/go.sum b/go.sum index df685dee25..dce1d6e6c1 100644 --- a/go.sum +++ b/go.sum @@ -2,9 +2,9 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= -bitbucket.org/creachadair/shell v0.0.6 h1:reJflDbKqnlnqb4Oo2pQ1/BqmY/eCWcNGHrIUO8qIzc= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= -bou.ke/monkey v1.0.2/go.mod h1:OqickVX3tNx6t33n1xvtTtu85YN5s6cKwVug+oHMaIA= +bitbucket.org/creachadair/shell v0.0.7 h1:Z96pB6DkSb7F3Y3BBnJeOZH2gazyMTWlvecSD4vDqfk= +bitbucket.org/creachadair/shell v0.0.7/go.mod h1:oqtXSSvSYr4624lnnabXHaBsYW6RD80caLi2b3hJk0U= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -37,8 +37,6 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.92.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= @@ -47,14 +45,16 @@ cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2Z cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.103.0 h1:YXtxp9ymmZjlGzxV7VrYQ8aaQuAgcqxSy6YhDX4I458= +cloud.google.com/go v0.103.0/go.mod h1:vwLx1nqLrzLX/fpwSMOXmFIqBOyHsvHbnAdbGSJ+mKk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.17.0/go.mod h1:pUlbH9kNOnp6ayShsqKLB6w49z14ILAaq0hrjh93Ajw= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.1.0/go.mod h1:2NIffxgWfORSI7EOYMFatGTfjMLnqrOKBEyYb6NoRgA= cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw= @@ -62,10 +62,12 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.9.0 h1:ED/FP4xv8GJw63v556/ASNc1CeeLUO2Bs8nzaHchkHg= +cloud.google.com/go/compute v1.9.0/go.mod h1:lWv1h/zUWTm/LozzfTJhBSkd6ShQq8la8VeeuOEGxfY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.5.0/go.mod h1:RGUNM0FFAVkYA94BLTxoXBgfIyY1Riq67TwaBXH0lwc= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/firestore v1.6.1 h1:8rBq3zRjnHx8UtBvaOWqBB1xq9jH6/wltfQLlTMh2Fw= @@ -79,7 +81,6 @@ cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSyw cloud.google.com/go/kms v1.1.0/go.mod h1:WdbppnCDMDpOvoYBMn1+gNmOeEoZYqAv+HeuKARGCXI= cloud.google.com/go/kms v1.4.0 h1:iElbfoE61VeLhnZcGOltqL8HIly8Nhbe5t6JlH9GXjo= cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/monitoring v0.1.0/go.mod h1:Hpm3XfzJv+UTiXzCG5Ffp0wijzHTC7Cv4eR7o3x/fEE= cloud.google.com/go/monitoring v1.1.0/go.mod h1:L81pzz7HKn14QCMaCs6NTQkdBnE87TElyanS95vIcl4= cloud.google.com/go/monitoring v1.4.0/go.mod h1:y6xnxfwI3hTFWOdkOaD7nfJVlwuC3/mS/5kvtT131p4= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -87,33 +88,37 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/pubsub v1.11.0-beta.schemas/go.mod h1:llNLsvx+RnsZJoY481TzC1XcdB2hWdR6gSWM5O4vgfs= cloud.google.com/go/pubsub v1.17.1/go.mod h1:4qDxMr1WsM9+aQAz36ltDwCIM+R0QdlseyFjBuNvnss= cloud.google.com/go/pubsub v1.19.0 h1:WZy66ga6/tqmZiwv1jwKVgqV8FuEuAmPR5CEJHNVCZk= cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs= cloud.google.com/go/secretmanager v1.0.0/go.mod h1:+Qkm5qxIJ5mk74xxIXA+87fseaY1JLYBcFPQoc/GQxg= cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U= -cloud.google.com/go/security v1.1.1/go.mod h1:QZd0wTwNJNKnl0H4/wAFD10TSX8kI4nk8V6ie6fyc9w= +cloud.google.com/go/security v1.4.1/go.mod h1:AvMZimkVRmH0wZ3thpVztocuf9zNEVPrlWprclDxYNg= cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs= cloud.google.com/go/spanner v1.18.0/go.mod h1:LvAjUXPeJRGNuGpikMULjhLj/t9cRvdc+fxRoLiugXA= -cloud.google.com/go/spanner v1.25.0/go.mod h1:kQUft3x355hzzaeFbObjsvkzZDgpDkesp3v75WBnI8w= +cloud.google.com/go/spanner v1.31.0/go.mod h1:ztDJVUZgEA2xc7HjSNQG+d+2L0bOSsw876/5Hnr78U8= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.15.0/go.mod h1:mjjQMoxxyGH7Jr8K5qrx6N2O0AHsczI61sMNn03GIZI= cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA= -cloud.google.com/go/storage v1.22.0/go.mod h1:GbaLEoMqbVm6sx3Z0R++gSiBlgMv6yUi2q1DeGFKQgE= cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= cloud.google.com/go/storage v1.24.0 h1:a4N0gIkx83uoVFGz8B2eAV3OhN90QoWF5OZWLKl39ig= cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE= -cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= cloud.google.com/go/trace v1.2.0/go.mod h1:Wc8y/uYyOhPy12KEnXG9XGrvfMz5F5SrYecQlbW1rwM= +code.gitea.io/gitea-vet v0.2.1/go.mod h1:zcNbT/aJEmivCAhfmkHOlT645KNOf9W2KnkLgFjGGfE= code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= code.gitea.io/sdk/gitea v0.14.0/go.mod h1:89WiyOX1KEcvjP66sRHdu0RafojGo60bT9UqW17VbWs= +code.gitea.io/sdk/gitea v0.15.1 h1:WJreC7YYuxbn0UDaPuWIe/mtiNKTvLN8MLkaw71yx/M= +code.gitea.io/sdk/gitea v0.15.1/go.mod h1:klY2LVI3s3NChzIk/MzMn7G1FHrfU7qd63iSMVoHRBA= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= @@ -124,8 +129,8 @@ contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0 contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= contrib.go.opencensus.io/exporter/stackdriver v0.13.10/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= +contrib.go.opencensus.io/exporter/stackdriver v0.13.12/go.mod h1:mmxnWlrvrFdpiOHOhxBaVi1rkc0WOqhgfknj4Yg0SeQ= contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= @@ -138,10 +143,11 @@ git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqbl github.com/ActiveState/vt10x v1.3.1 h1:7qi8BGXUEBghzBxfXSY0J77etO+L95PZQlwD7ay2mn0= github.com/ActiveState/vt10x v1.3.1/go.mod h1:8wJKd36c9NmCfGyPyOJmkvyIMvbUPfHkfdS8zZlK19s= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20211102141018-f7be0cbad29c/go.mod h1:WpB7kf89yJUETZxQnP1kgYPNwlT2jjdDYUCoxVggM3g= github.com/AlecAivazis/survey/v2 v2.3.5/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw= github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= @@ -157,13 +163,14 @@ github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v60.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v60.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v61.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v62.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v63.3.0+incompatible h1:INepVujzUrmArRZjDLHbtER+FkvCoEwyRCXGqOlmDII= github.com/Azure/azure-sdk-for-go v63.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE= +github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= @@ -188,16 +195,18 @@ github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgq github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= +github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.5.2/go.mod h1:q98IH4qgc3eWM4/WOeR5+YPmBuy8Lq0jNRDwSM0CuFk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU= github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= @@ -251,6 +260,7 @@ github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0 github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -276,6 +286,8 @@ github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -287,19 +299,15 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= -github.com/PaesslerAG/gval v1.0.0 h1:GEKnRwkWDdf9dOmKcNrar9EA1bz1z9DqPIO1+iLzhd8= github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= -github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEsylIk= github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/ReneKroon/ttlcache/v2 v2.10.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY= -github.com/ReneKroon/ttlcache/v2 v2.11.0 h1:OvlcYFYi941SBN3v9dsDcC2N8vRxyHcCmJb3Vl4QMoM= github.com/ReneKroon/ttlcache/v2 v2.11.0/go.mod h1:mBxvsNY+BT8qLLd6CuAJubbKo6r0jh3nb5et22bbfGY= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -316,9 +324,10 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210420163308-c1402a70e2f1/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210609063737-0067dc6dcea2/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20220720053627-e327d0730470/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= @@ -334,9 +343,47 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.1.12/go.mod h1:sTAjsFJmVsmcVeklL9d9uDBlFsgl43wZ6jhI6BHqHqU= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-openapi v0.1.18 h1:3eUVmAr7WCJp7fgIvmCd9ZUyuwtJYbtUqJIed5eXCmk= +github.com/alibabacloud-go/darabonba-openapi v0.1.18/go.mod h1:PB4HffMhJVmAgNKNq3wYbTUlFvPgxJpTzd1F5pTuUsc= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68 h1:NqugFkGxx1TXSh/pBcU00Y6bljgDPaFdh5MUSeJ7e50= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.0.9/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.11 h1:iYnqOPR5hyEEnNZmebGyRMkkEJRWUEjDiiaOHZ5aNhA= +github.com/alibabacloud-go/openapi-util v0.0.11/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.18 h1:+6GJ06eu5Cr/Mkj09vWrf6QAfrPepctY2OxcWNclRC0= +github.com/alibabacloud-go/tea v1.1.18/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-utils v1.4.4 h1:lxCDvNCdTo9FaXKKq45+4vGETQUKNOW/qKTcX9Sk53o= +github.com/alibabacloud-go/tea-utils v1.4.4/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.2 h1:oLxa7JUXm2EDFzMg+7oRsYc+kutgCVwm+bZlhhmvW5M= +github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyun/credentials-go v1.2.3 h1:Vmodnr52Rz1mcbwn0kzMhLRKb6soizewuKXdfZiNemU= +github.com/aliyun/credentials-go v1.2.3/go.mod h1:/KowD1cfGSLrLsH28Jr8W+xwoId0ywIy5lNzDz6O1vw= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -348,19 +395,19 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed h1:u github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= -github.com/apache/beam v2.32.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= +github.com/apache/beam/sdks/v2 v2.0.0-20211012030016-ef4364519c94/go.mod h1:/kOom7hCyHVzAC/Z7HbZywkZZv6ywF+wb4CvgDVdcB8= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.11/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= @@ -369,10 +416,7 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -391,62 +435,81 @@ github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.42.8/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.42.22/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.42.25/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= +github.com/aws/aws-sdk-go v1.43.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.43.45 h1:2708Bj4uV+ym62MOtBnErm/CDX61C4mFe9V2gXy1caE= -github.com/aws/aws-sdk-go v1.43.45/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.22/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.76/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.80/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.93 h1:hAgd9fuaptBatSft27/5eBMdcA8+cIMqo96/tZ6rKl8= +github.com/aws/aws-sdk-go v1.44.93/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.1/go.mod h1:L5LuPC1ZgDr2xQS7AmIec/Jlc7O/Y1u2KxJyNVab250= github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2 v1.12.0/go.mod h1:tWhQI5N5SiMawto3uMAQJU5OUN/1ivhDDHq7HTsJvZ0= github.com/aws/aws-sdk-go-v2 v1.14.0/go.mod h1:ZA3Y8V0LrlWj63MQAnRHgKf/5QB//LSZCPNWlWrNGLU= -github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= +github.com/aws/aws-sdk-go-v2 v1.16.11/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= +github.com/aws/aws-sdk-go-v2 v1.16.14 h1:db6GvO4Z2UqHt5gvT0lr6J5x5P+oQ7bdRzczVaRekMU= +github.com/aws/aws-sdk-go-v2 v1.16.14/go.mod h1:s/G+UV29dECbF5rf+RNj1xhlmvoNurGSr+McVSRj59w= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= github.com/aws/aws-sdk-go-v2/config v1.5.0/go.mod h1:RWlPOAW3E3tbtNAqTwvSW54Of/yP3oiZXMI0xfUdjyA= github.com/aws/aws-sdk-go-v2/config v1.10.1/go.mod h1:auIv5pIIn3jIBHNRcVQcsczn6Pfa6Dyv80Fai0ueoJU= github.com/aws/aws-sdk-go-v2/config v1.12.0/go.mod h1:GQONFVSDdG6RRho1C730SGNyDhS1kSTnxpOE76ptBqo= github.com/aws/aws-sdk-go-v2/config v1.14.0/go.mod h1:GKDRrvsq/PTaOYc9252u8Uah1hsIdtor4oIrFvUNPNM= -github.com/aws/aws-sdk-go-v2/config v1.15.3 h1:5AlQD0jhVXlGzwo+VORKiUuogkG7pQcLJNzIzK7eodw= github.com/aws/aws-sdk-go-v2/config v1.15.3/go.mod h1:9YL3v07Xc/ohTsxFXzan9ZpFpdTOFl4X65BAKYaz8jg= +github.com/aws/aws-sdk-go-v2/config v1.17.0/go.mod h1:4SKzBMiB8lV0fw2w7eDBo/LjQyHFITN4vUUuqpurFmI= +github.com/aws/aws-sdk-go-v2/config v1.17.1/go.mod h1:uOxDHjBemNTF2Zos+fgG0NNfE86wn1OAHDTGxjMEYi0= +github.com/aws/aws-sdk-go-v2/config v1.17.5 h1:+NS1BWvprx7nHcIk5o32LrZgifs/7Pm1V2nWjQgZ2H0= +github.com/aws/aws-sdk-go-v2/config v1.17.5/go.mod h1:H0cvPNDO3uExWts/9PDhD/0ne2esu1uaIulwn1vkwxM= github.com/aws/aws-sdk-go-v2/credentials v1.3.1/go.mod h1:r0n73xwsIVagq8RsxmZbGSRQFj9As3je72C2WzUIToc= github.com/aws/aws-sdk-go-v2/credentials v1.6.1/go.mod h1:QyvQk1IYTqBWSi1T6UgT/W8DMxBVa5pVuLFSRLLhGf8= github.com/aws/aws-sdk-go-v2/credentials v1.7.0/go.mod h1:Kmq64kahHJtXfmnEwnvRKeNjLBqkdP++Itln9BmQerE= github.com/aws/aws-sdk-go-v2/credentials v1.9.0/go.mod h1:PyHKqk/+tJuDY7T8R580S1j/AcSD+ODeUZ99CAUKLqQ= -github.com/aws/aws-sdk-go-v2/credentials v1.11.2 h1:RQQ5fzclAKJyY5TvF+fkjJEwzK4hnxQCLOu5JXzDmQo= github.com/aws/aws-sdk-go-v2/credentials v1.11.2/go.mod h1:j8YsY9TXTm31k4eFhspiQicfXPLZ0gYXA50i4gxPE8g= +github.com/aws/aws-sdk-go-v2/credentials v1.12.13/go.mod h1:9fDEemXizwXrxPU1MTzv69LP/9D8HVl5qHAQO9A9ikY= +github.com/aws/aws-sdk-go-v2/credentials v1.12.14/go.mod h1:opAndTyq+YN7IpVG57z2CeNuXSQMqTYxGGlYH0m0RMY= +github.com/aws/aws-sdk-go-v2/credentials v1.12.18 h1:HF62tbhARhgLfvmfwUbL9qZ+dkbZYzbFdxBb3l5gr7Q= +github.com/aws/aws-sdk-go-v2/credentials v1.12.18/go.mod h1:O7n/CPagQ33rfG6h7vR/W02ammuc5CrsSM22cNZp9so= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.3.0/go.mod h1:2LAuqPx1I6jNfaGDucWfA2zqQCYCOMCDHiCOciALyNw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0/go.mod h1:19SxQ+9zANyJCnNaoF3ovl8bFil4TaqCYEDdqNGKM+A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.11.0/go.mod h1:rwdUKJV5rm+vHu1ncD1iGDqahBEL8O0tBjVqo9eO2N0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3 h1:LWPg5zjHV9oz/myQr4wMs0gi4CjnDN/ILmyZUFYXZsU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.3/go.mod h1:uk1vhHHERfSVCUnqSqz8O48LBYDSC+k6brng09jcMOk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.12/go.mod h1:aZ4vZnyUuxedC7eD4JyEHpGnCz+O2sHQEx3VvAwklSE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.15 h1:nkQ+aI0OCeYfzrBipL6ja/6VEbUnHQoZHBHtoK+Nzxw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.15/go.mod h1:Oz2/qWINxIgSmoZT9adpxJy2UhpcOAI3TIyWgYMVSz0= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1/go.mod h1:wN/mvkow08GauDwJ70jnzJ1e+hE+Q3Q7TwpYLXOe9oI= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.3/go.mod h1:0dHuD2HZZSiwfJSy1FO5bX1hQ1TxVV1QXXjpn3XUE44= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3/go.mod h1:L72JSFj9OwHwyukeuKFFyTj6uFWE4AjB0IQp97bd9Lc= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.5/go.mod h1:2hXc8ooJqF2nAznsbJQIn+7h851/bu8GVC80OVTTqf8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9 h1:onz/VaaxZ7Z4V+WIN9Txly9XLTmoOh1oJ8XcAC3pako= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.9/go.mod h1:AnVH5pvai0pAF4lXRq0bmhbes1u9R8wTE+g+183bZNM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.18/go.mod h1:348MLhzV1GSlZSMusdwQpXKbhD7X2gbI/TxwAPKkYZQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21 h1:gRIXnmAVNyoRQywdNtpAkgY+f30QNzgF53Q5OobNZZs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21/go.mod h1:XsmHMV9c512xgsW01q7H0ut+UQQQpWX8QsFbdLHDwaU= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0/go.mod h1:KdVvdk4gb7iatuHZgIkIqvJlWHBtjCJLUtD/uO/FkWw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.3.0/go.mod h1:miRSv9l093jX/t/j+mBCaLqFHo9xKYzJ7DGm1BsGoJM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3 h1:9stUQR/u2KXU6HkFJYlqnZEjBnbgrVbG6I5HN09xZh0= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.3/go.mod h1:ssOhaLpRlh88H3UmEcsBoVKq309quMvm3Ds8e9d4eJM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.12/go.mod h1:ckaCVTEdGAxO6KwTGzgskxR1xM+iJW4lxMyDFVda2Fc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15 h1:noAhOo2mMDyYhTx99aYPvQw16T3fQ/DiKAv9fzpIKH8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15/go.mod h1:kjJ4CyD9M3Wq88GYg3IPfj67Rs0Uvz8aXK7MJ8BvE4I= github.com/aws/aws-sdk-go-v2/internal/ini v1.1.1/go.mod h1:Zy8smImhTdOETZqfyn01iNOe0CNggVbPjCajyaz6Gvg= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3/go.mod h1:N4dv+zawriMFZBO/6UKg3zt+XO6xWOQo1neAA0lFbo4= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.6/go.mod h1:o1ippSg3yJx5EuT4AOGXJCUcmt5vrcxla1cg6K1Q8Iw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10 h1:by9P+oy3P/CwggN4ClnW2D4oL91QV7pBzBICi1chZvQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.10/go.mod h1:8DcYQcz0+ZJaSxANlHIsbbi6S+zMwjwdDqwW3r9AzaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.19/go.mod h1:cVHo8KTuHjShb9V8/VjH3S/8+xPu16qx8fdGwmotJhE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.22 h1:nF+E8HfYpOMw6M5oA9efB602VC00IHNQnB5CmFvZPvA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.22/go.mod h1:tltHVGy977LrSOgRR5aV9+miyno/Gul/uJNPKS7FzP4= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/ecr v1.4.1/go.mod h1:FglZcyeiBqcbvyinl+n14aT/EWC7S1MIH+Gan2iizt0= github.com/aws/aws-sdk-go-v2/service/ecr v1.13.0/go.mod h1:X9rkClmo0/dXh2fwvhkMoXR5zxirrzCqMgfU+Z0HIgs= @@ -463,12 +526,17 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.1/go.mod h1:zce github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0/go.mod h1:wTgFkG6t7jS/6Y0SILXwfspV3IXowb6ngsAlSajW0Kc= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.8.0/go.mod h1:rBDLgXDAwHOfxZKLRDl8OGTPzFDC+a2pLqNNj8+QwfI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3 h1:Gh1Gpyh01Yvn7ilO/b/hr01WgNpaszfbKMUgqM186xQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.3/go.mod h1:wlY6SVjuwvh3TVRpTqdy4I1JpBFLX4UGeKZdWntaocw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.12/go.mod h1:1TODGhheLWjpQWSuhYuAUWYTCKwEjx2iblIFKDHjeTc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15 h1:xlf0J6DUgAj/ocvKQxCmad8Bu1lJuRbt5Wu+4G1xw1g= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15/go.mod h1:ZVJ7ejRl4+tkWMuCwjXoy0jd8fF5u3RCyWjSVjUIvQE= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0/go.mod h1:xKCZ4YFSF2s4Hnb/J0TLeOsKuGzICzcElaOKNGrVnx4= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.3/go.mod h1:Bm/v2IaN6rZ+Op7zX+bOUMdL4fsrYZiD0dsjLhNKwZc= github.com/aws/aws-sdk-go-v2/service/kms v1.10.0/go.mod h1:ZkHWL8m5Nw1g9yMXqpCjnIJtSDToAmNbXXZ9gj0bO7s= github.com/aws/aws-sdk-go-v2/service/kms v1.16.3/go.mod h1:QuiHPBqlOFCi4LqdSskYYAWpQlx3PKmohy+rE2F+o5g= +github.com/aws/aws-sdk-go-v2/service/kms v1.18.4/go.mod h1:WG8HUJKtDqXJM3+CNZeN+2wvdcJb5vprKo01fr1KQW4= +github.com/aws/aws-sdk-go-v2/service/kms v1.18.9 h1:BPMcM9DZdpQKWQ8WSXla36mpm+5YgVqP7pLF+W7TEe0= +github.com/aws/aws-sdk-go-v2/service/kms v1.18.9/go.mod h1:8sR6O18d56mlJf0VkYD7mOtrBoM//8eym7FcfG1t9Sc= github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0/go.mod h1:Gwz3aVctJe6mUY9T//bcALArPUaFmNAy2rTB9qN4No8= github.com/aws/aws-sdk-go-v2/service/s3 v1.26.3/go.mod h1:g1qvDuRsJY+XghsV6zg00Z4KJ7DtFFCx8fJD2a491Ak= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.10.0/go.mod h1:qAgsrzF3Z2vvV01j79fs7D75ofCMQe81/OKBJx0rjFY= @@ -483,21 +551,30 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.3.1/go.mod h1:J3A3RGUvuCZjvSuZEcOpHD github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM= github.com/aws/aws-sdk-go-v2/service/sso v1.8.0/go.mod h1:AB6v3BedyhVRIbPQbJnUsBmtup2pFiikpp5n3YyB6Ac= github.com/aws/aws-sdk-go-v2/service/sso v1.10.0/go.mod h1:m1CRRFX7eH3EE6w0ntdu+lo+Ph9VS7y8qRV/vdym0ZY= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.3 h1:frW4ikGcxfAEDfmQqWgMLp+F1n4nRo9sF39OcIb5BkQ= github.com/aws/aws-sdk-go-v2/service/sso v1.11.3/go.mod h1:7UQ/e69kU7LDPtY40OyoHYgRmgfGM4mgsLYtcObdveU= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.16/go.mod h1:mS5xqLZc/6kc06IpXn5vRxdLaED+jEuaSRv5BxtnsiY= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.17/go.mod h1:mS5xqLZc/6kc06IpXn5vRxdLaED+jEuaSRv5BxtnsiY= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.21 h1:7jUFr+7F4MzIjCZzy7ygRtXFQcQ0kAbT0gUvtUeAdyU= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.21/go.mod h1:q8nYq51W3gpZempYsAD83fPRlrOTMCwN+Ahg4BKFTXQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.3 h1:UTTPNP3/WzZa7hoHP3Szb/Yl0bM3NoBrf5ABy1OArUM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.3/go.mod h1:+IF75RMJh0+zqTGXGshyEGRsU2ImqWv6UuHGkHl6kEo= github.com/aws/aws-sdk-go-v2/service/sts v1.6.0/go.mod h1:q7o0j7d7HrJk/vr9uUt3BVRASvcU7gYZB9PUgPiByXg= github.com/aws/aws-sdk-go-v2/service/sts v1.10.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE= github.com/aws/aws-sdk-go-v2/service/sts v1.13.0/go.mod h1:jQto17aC9pJ6xRa1g29uXZhbcS6qNT3PSnKfPShq4sY= github.com/aws/aws-sdk-go-v2/service/sts v1.15.0/go.mod h1:E264g2Gl5U9KTGzmd8ypGEAoh75VmqyuA/Ox5O1eRE4= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.3 h1:cJGRyzCSVwZC7zZZ1xbx9m32UnrKydRYhOvcD1NYP9Q= github.com/aws/aws-sdk-go-v2/service/sts v1.16.3/go.mod h1:bfBj0iVmsUyUg4weDB4NxktD9rDGeKSVWnjTnwbx9b8= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.13/go.mod h1:Ru3QVMLygVs/07UQ3YDur1AQZZp2tUNje8wfloFttC0= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.17 h1:LVM2jzEQ8mhb2dhrFl4PJ3sa5+KcKT01dsMk2Ma9/FU= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.17/go.mod h1:bQujK1n0V1D1Gz5uII1jaB1WDvhj4/T3tElsJnVXCR0= github.com/aws/smithy-go v1.6.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.9.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.11.0/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= github.com/aws/smithy-go v1.11.2/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= +github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.2 h1:TBLKyeJfXTrTXRHmsv4qWt9IQGYyWThLYaJWSahTOGE= +github.com/aws/smithy-go v1.13.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20211215200129-69c85dc22db6/go.mod h1:8vJsEZ4iRqG+Vx6pKhWK6U00qcj0KC37IsfszMkY6UE= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 h1:IWeCJzU+IYaO2rVEBlGPTBfe90cmGXFTLdhUFlzDGsY= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795/go.mod h1:8vJsEZ4iRqG+Vx6pKhWK6U00qcj0KC37IsfszMkY6UE= @@ -528,6 +605,7 @@ github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHf github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bluekeyes/go-gitdiff v0.4.0 h1:Q3qUnQ5cv27vG6ywUTiSQUobRYRcQIBs8KVGKojLg9I= github.com/bluekeyes/go-gitdiff v0.4.0/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= @@ -535,24 +613,23 @@ github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= -github.com/bradleyfalzon/ghinstallation/v2 v2.0.3/go.mod h1:tlgi+JWCXnKFx/Y4WtnDbZEINo31N5bcvnCoqieefmk= github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A= github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= -github.com/bytecodealliance/wasmtime-go v0.31.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= -github.com/bytecodealliance/wasmtime-go v0.33.1/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= +github.com/bytecodealliance/wasmtime-go v0.36.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= -github.com/carolynvs/magex v0.7.0/go.mod h1:vZB3BkRfkd5ZMtkxJkCGbdFyWGoZiuNPKhx6uEQARmY= +github.com/carolynvs/magex v0.9.0/go.mod h1:H1LW6RYJ/sNbisMmPe9E73aJZa8geKLKK9mBWLWz3ek= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= @@ -565,6 +642,8 @@ github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTx github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= @@ -596,14 +675,16 @@ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/clbanning/mxj/v2 v2.5.6 h1:Jm4VaCI/+Ug5Q57IzEoZbwx4iQFA6wkXv72juUSeK+g= +github.com/clbanning/mxj/v2 v2.5.6/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc= github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI= github.com/cloudevents/sdk-go/sql/v2 v2.8.0/go.mod h1:u9acNJbhmi1wnDJro4PEAqbr4N1LTCyEUClErxbPS1A= github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY= -github.com/cloudevents/sdk-go/v2 v2.5.0/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U= github.com/cloudevents/sdk-go/v2 v2.8.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs= github.com/cloudevents/sdk-go/v2 v2.10.1/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs= github.com/cloudevents/sdk-go/v2 v2.11.0 h1:pCb7Cdkb8XpUoil+miuw6PEzuCG9cc8Erj8y1/q3odo= @@ -676,8 +757,11 @@ github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= -github.com/containerd/containerd v1.6.0 h1:CLa12ZcV0d2ZTRKq1ssioeJpTnPJBMyndpEKA+UtzJg= github.com/containerd/containerd v1.6.0/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= +github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -696,6 +780,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -706,6 +791,7 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= @@ -713,8 +799,10 @@ github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+EL github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= github.com/containerd/stargz-snapshotter/estargz v0.11.0/go.mod h1:/KsZXsJRllMbTKFfG0miFQWViQKdI9+9aSXs+HN0+ac= -github.com/containerd/stargz-snapshotter/estargz v0.11.1 h1:mNQqxcAWmDrV6d6yUvzFhfY8puNzoQz9v4diW+Pmei4= github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.12.0 h1:idtwRTLjk2erqiYhPWy2L844By8NRFYEwYHcXhoIWPM= +github.com/containerd/stargz-snapshotter/estargz v0.12.0/go.mod h1:AIQ59TewBFJ4GOPEQXujcrJ/EKxh5xXZegW1rkR1P/M= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -734,13 +822,16 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -750,8 +841,9 @@ github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= -github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.2.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.3.0 h1:Y1LV3mP+QT3MEycATZpAiwfyN+uxZLqVbAHJUuOJEe4= +github.com/coreos/go-oidc/v3 v3.3.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -777,8 +869,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.16 h1:vfetlOf3A+9YKggibynnX9mnFjuSVvkRj+IWpcTSLEQ= -github.com/creack/pty v1.1.16/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -808,6 +898,8 @@ github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27 github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/denisenkom/go-mssqldb v0.12.2/go.mod h1:lnIw1mZukFRZDJYQ0Pb833QS2IaC3l5HkEfra2LJ+sk= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= @@ -820,18 +912,21 @@ github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlD github.com/dgryski/go-lttb v0.0.0-20180810165845-318fcdf10a77/go.mod h1:Va5MyIzkU0rAM92tn3hb3Anb7oz7KcnixF49+2wOMe4= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dimfeld/httppath v0.0.0-20170720192232-ee938bf73598/go.mod h1:0FpDmbrt36utu8jEmeU05dPC9AB5tsLYVVi+ZHfyuwI= github.com/dimfeld/httptreemux/v5 v5.4.0 h1:IiHYEjh+A7pYbhWyjmGnj5HZK6gpOOvyBXCJ+BE8/Gs= github.com/dimfeld/httptreemux/v5 v5.4.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw= +github.com/distribution/distribution/v3 v3.0.0-20220526142353-ffbd94cbe269/go.mod h1:28YO/VJk9/64+sTGNuYaBjWxrXTPrj0C0XmgTIOjxX4= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.12+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.16+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9KktZyjev3SIVDU= github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= @@ -842,8 +937,9 @@ github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6 github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.16+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.18+incompatible h1:SN84VYXTBNGn92T/QwIRPlum9zfemfitN7pbsp26WSc= github.com/docker/docker v20.10.18+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -854,17 +950,15 @@ github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6Uezg github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvyukov/go-fuzz v0.0.0-20210914135545-4980593459a1/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -874,13 +968,16 @@ github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/eggsampler/acme/v3 v3.2.1/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= +github.com/eggsampler/acme/v3 v3.3.0/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.16.0+incompatible h1:rgqiKNjTnFQA6kkhFe16D8epTksy9HQ1MyrbDXSdYhM= +github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/proto v1.6.15/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -931,6 +1028,7 @@ github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fernet/fernet-go v0.0.0-20191111064656-eff2850e6001/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= @@ -938,7 +1036,6 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -962,8 +1059,9 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= -github.com/fullstorydev/grpcurl v1.8.2 h1:2II5e++aFnctnPJir3GL6cPSwF69Ord1u/9O+fv1vrI= -github.com/fullstorydev/grpcurl v1.8.2/go.mod h1:YvWNT3xRp2KIRuvCphFodG0fKkMXwaxA9CJgKCcyzUQ= +github.com/fullstorydev/grpcurl v1.8.6 h1:WylAwnPauJIofYSHqqMTC1eEfUIzqzevXyogBxnQquo= +github.com/fullstorydev/grpcurl v1.8.6/go.mod h1:WhP7fRQdhxz2TkL97u+TCb505sxfH78W1usyoB3tepw= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -983,13 +1081,10 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gin-gonic/gin v1.7.3/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= @@ -998,10 +1093,13 @@ github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJu github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -1009,6 +1107,7 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-gormigrate/gormigrate/v2 v2.0.2/go.mod h1:vld36QpBTfTzLealsHsmQQJK5lSwJt6wiORv+oFX8/I= github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -1027,154 +1126,93 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= -github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= -github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= -github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= -github.com/go-openapi/loads v0.21.0/go.mod h1:rHYve9nZrQ4CJhyeIIFJINGCg1tQpx2yJrrNo8sf1ws= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= -github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/runtime v0.21.0/go.mod h1:aQg+kaIQEn+A2CRSY1TxbM8+sT9g2V3aLc1FbIAnbbs= -github.com/go-openapi/runtime v0.22.0/go.mod h1:aQg+kaIQEn+A2CRSY1TxbM8+sT9g2V3aLc1FbIAnbbs= -github.com/go-openapi/runtime v0.24.0 h1:vTgDijpGLCgJOJTdAp5kG+O+nRsVCbH417YQ3O0iZo0= -github.com/go-openapi/runtime v0.24.0/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= +github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo= +github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= -github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= -github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.5 h1:skHa8av4VnAtJU5zyAUXrrdK/NDiVX8lchbG+BfcdrE= -github.com/go-openapi/spec v0.20.5/go.mod h1:QbfOSIVt3/sac+a1wzmKbbcLXm5NdZnyBZYtCijp43o= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= -github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= +github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o= +github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= -github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= -github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-openapi/validate v0.20.3/go.mod h1:goDdqVGiigM3jChcrYJxD2joalke3ZXeftD16byIjA4= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= +github.com/go-openapi/swag v0.22.1/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-piv/piv-go v1.9.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= +github.com/go-piv/piv-go v1.10.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-playground/validator/v10 v10.10.1 h1:uA0+amWMiglNZKZ9FJRKUAe9U3RX91eVn1JYXMWt7ig= -github.com/go-playground/validator/v10 v10.10.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= -github.com/go-rod/rod v0.101.8/go.mod h1:N/zlT53CfSpq74nb6rOR0K8UF0SPUPBmzBnArrms+mY= -github.com/go-rod/rod v0.106.1 h1:+9YdoTT56KI3KrFfWVr3I13wh0qbhm/Aq+7JvCBA6AQ= -github.com/go-rod/rod v0.106.1/go.mod h1:+YLe2X+nAuEGpYWs7rKPZr9SMX100FbxYZaeU1Dofpc= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-rod/rod v0.106.8/go.mod h1:xkZOchuKqTOkMOBkrzb7uJpbKZRab1haPCWDvuZkS2U= +github.com/go-rod/rod v0.109.1/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE= +github.com/go-rod/rod v0.109.3 h1:MxuSJGK9lEUq07K+QPfnxnuvQpsQT+YI4SoQjSE0LVg= +github.com/go-rod/rod v0.109.3/go.mod h1:GZDtmEs6RpF6kBRYpGCZXxXlKNneKVPiKOjaMbmVVjE= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= @@ -1195,6 +1233,7 @@ github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/goadesign/goa v2.2.5+incompatible/go.mod h1:d/9lpuZBK7HFi/7O0oXfwvdoIl+nx2bwKqctZe/lQao= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= @@ -1247,6 +1286,8 @@ github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22 github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/goccy/go-json v0.9.6/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/kpoward v0.1.0 h1:UcrLMG9rq7NwrMiUc0h+qUyIlvqPzqLiPb+zQEqH8cE= +github.com/goccy/kpoward v0.1.0/go.mod h1:m13lkcWSvNXtYC9yrXzguwrt/YTDAGioPusndMdQ+eA= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -1276,8 +1317,9 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= @@ -1341,6 +1383,7 @@ github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPP github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/diff v0.0.0-20181124234638-500114f11e71/go.mod h1:22dM4PLscQl+Nzf64qNBurVJvfyvZELT0iRW2l/NN70= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -1364,8 +1407,8 @@ github.com/google/certificate-transparency-go v1.0.22-0.20181127102053-c25855a82 github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ= github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6/go.mod h1:aF2dp7Dh81mY8Y/zpzyXps4fQW5zQbDu2CxfpJB6NkI= -github.com/google/certificate-transparency-go v1.1.2 h1:4hE0GEId6NAW28dFpC+LrRGwQX5dtmXQGDbg8+/MZOM= -github.com/google/certificate-transparency-go v1.1.2/go.mod h1:3OL+HKDqHPUfdKrHVQxO6T8nDLO0HF7LRTlkIWXaWvQ= +github.com/google/certificate-transparency-go v1.1.3 h1:WEb38wcTe0EuAvg7USzgklnOjjnlMaahYO3faaqnCn8= +github.com/google/certificate-transparency-go v1.1.3/go.mod h1:S9FT/VzOUzhOGG0iLrzDs+f5Ml/zm7IYY/w+IlHz01M= github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -1385,16 +1428,15 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw= -github.com/google/go-containerregistry v0.7.1-0.20211118220127-abdc633f8305/go.mod h1:6cMIl1RfryEiPzBE67OgtZdEiLWz4myqCQIiBMy3CsM= github.com/google/go-containerregistry v0.8.0/go.mod h1:wW5v71NHGnQyb4k+gSshjxidrC7lN33MdWEn+Mz9TsI= github.com/google/go-containerregistry v0.8.1-0.20220110151055-a61fd0a8e2bb/go.mod h1:wW5v71NHGnQyb4k+gSshjxidrC7lN33MdWEn+Mz9TsI= -github.com/google/go-containerregistry v0.8.1-0.20220209165246-a44adc326839/go.mod h1:cwx3SjrH84Rh9VFJSIhPh43ovyOp3DCWgY3h8nWmdGQ= github.com/google/go-containerregistry v0.8.1-0.20220216220642-00c59d91847c/go.mod h1:MMbnwuvLeZJRPqhTs8jDWc8xGlOs5YCGx1TSc/qdExk= github.com/google/go-containerregistry v0.8.1-0.20220219142810-1571d7fdc46e/go.mod h1:MMbnwuvLeZJRPqhTs8jDWc8xGlOs5YCGx1TSc/qdExk= -github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387 h1:GWICy4b02s8EA1M9H5krRQ48BKpIHO5LtBBm2BQLhx0= github.com/google/go-containerregistry v0.8.1-0.20220414143355-892d7a808387/go.mod h1:eTLvLZaEe2FoQsb25t7BLxQQryyrwHTzFfwxN87mhAw= +github.com/google/go-containerregistry v0.9.0/go.mod h1:9eq4BnSufyT1kHNffX+vSXVonaJ7yaIOulrKZejMxnQ= +github.com/google/go-containerregistry v0.11.0 h1:Xt8x1adcREjFcmDoDK8OdOsjxu90PHkGuwNP8GiHMLM= +github.com/google/go-containerregistry v0.11.0/go.mod h1:BBaYtsHPHA42uEgAvd/NejvAfPSlz281sJWqupjSxfk= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220120151853-ac864e57b117/go.mod h1:BH7pLQnIZhfVpL7cRyWhvvz1bZLY9V45/HvXVh5UMDY= -github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220310143843-f1fa40b162a1/go.mod h1:gm/Zjh0iiPBfwgDIYgHJCRxaGzBZu1njCgwX1EmC1Tw= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220328141311-efc62d802606 h1:eJgk7LEoexSTTwQm4/gYybCyFdUme5PSHua4yW/26dA= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20220328141311-efc62d802606/go.mod h1:gm/Zjh0iiPBfwgDIYgHJCRxaGzBZu1njCgwX1EmC1Tw= github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220110151055-a61fd0a8e2bb/go.mod h1:SK4EqntTk6tHEyNngoqHUwjjZaW6mfzLukei4+cbvu8= @@ -1403,9 +1445,8 @@ github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-2022030118263 github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM= -github.com/google/go-github/v39 v39.0.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= -github.com/google/go-github/v42 v42.0.0 h1:YNT0FwjPrEysRkLIiKuEfSvBPCGKphW5aS5PxwaoLec= -github.com/google/go-github/v42 v42.0.0/go.mod h1:jgg/jvyI0YlDOM1/ps6XYh04HNQ3vKf0CVko62/EhRg= +github.com/google/go-github/v45 v45.2.0 h1:5oRLszbrkvxDDqBCNj2hjDZMKmvexaZ1xw/FCD+K3FI= +github.com/google/go-github/v45 v45.2.0/go.mod h1:FObaZJEDSTa/WGCzZ2Z3eoCDXWJKMenWWTrd8jrta28= github.com/google/go-licenses v0.0.0-20200602185517-f29a4c695c3d/go.mod h1:g1VOUGKZYIqe8lDq2mL7plhAWXqrEaGUs7eIjthN1sk= github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= github.com/google/go-licenses v0.0.0-20210816172045-3099c18c36e1/go.mod h1:WkPB6PtjnM1pF4qeK8RcFmk7z+TIsrPYOng7OPlFDiw= @@ -1463,11 +1504,12 @@ github.com/google/rpmpack v0.0.0-20210518075352-dc539ef4f2ea/go.mod h1:+y9lKiqDh github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ= github.com/google/trillian v1.3.14-0.20210511103300-67b5f349eefa/go.mod h1:s4jO3Ai4NSvxucdvqUHON0bCqJyoya32eNw6XJwsmNc= -github.com/google/trillian v1.4.0 h1:Wa7XHCVzl8RLsUOr2SzoHUZHYjv0G8KMO1xZGamYkbA= -github.com/google/trillian v1.4.0/go.mod h1:1Bja2nEgMDlEJWWRXBUemSPG9qYw84ZYX2gHRVHlR+g= +github.com/google/trillian v1.4.1 h1:r/LV2L6uq6ijSSQNSyxnLXFU/JY7DaT6AILx1sOx2+8= +github.com/google/trillian v1.4.1/go.mod h1:43IVCsGXxP5mZK9yFkTQdQrMQm/wryNBV2GNEdqzVz8= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1501,6 +1543,7 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= @@ -1558,9 +1601,10 @@ github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqC github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3/go.mod h1:lZdb/YAJUSj9OqrCHs2ihjtoO3+xK3G53wTYXFWRGDo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3 h1:BGNSrTRW4rwfhJiFwvwF4XQ0Y72Jj9YEgxVrtovbD5o= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.3/go.mod h1:VHn7KgNsRriXa4mcgtkpR00OXyQY6g67JWMvn+R27A4= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.2 h1:BqHID5W5qnMkug0Z8UmL8tN0gAy4jQ+B4WFt8cCgluU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.2/go.mod h1:ZbS3MZTZq/apAfAEHGoB5HbsQQstoqP92SjAqtQ9zeg= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b h1:wDUNC2eKiL35DbLvsDhiblTUXHxcOPwQSCzi7xpQUN4= @@ -1591,6 +1635,8 @@ github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39E github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -1612,7 +1658,6 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= @@ -1624,8 +1669,7 @@ github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtf github.com/hashicorp/go-secure-stdlib/mlock v0.1.2 h1:p4AKXPPS24tO8Wc8i1gLvSKdmkiSY5xuju57czJ/IJQ= github.com/hashicorp/go-secure-stdlib/mlock v0.1.2/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.2/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.4/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.5/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= @@ -1668,18 +1712,20 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/vault/api v1.3.0/go.mod h1:EabNQLI0VWbWoGlA+oBLC8PXmR9D60aUVgQGvangFWQ= -github.com/hashicorp/vault/api v1.3.1/go.mod h1:QeJoWxMFt+MsuWcYhmwRLwKEXrjwAFFywzhptMsTIUw= -github.com/hashicorp/vault/api v1.5.0 h1:Bp6yc2bn7CWkOrVIzFT/Qurzx528bdavF3nz590eu28= -github.com/hashicorp/vault/api v1.5.0/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= -github.com/hashicorp/vault/sdk v0.3.0/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/vault/api v1.4.1/go.mod h1:LkMdrZnWNrFaQyYYazWVn7KshilfDidgVBq6YiTq/bM= +github.com/hashicorp/vault/api v1.6.0/go.mod h1:h1K70EO2DgnBaTz5IsL6D5ERsNt5Pce93ueVS2+t0Xc= +github.com/hashicorp/vault/api v1.7.2 h1:kawHE7s/4xwrdKbkmwQi0wYaIeUhk5ueek7ljuezCVQ= +github.com/hashicorp/vault/api v1.7.2/go.mod h1:xbfA+1AvxFseDzxxdWaL0uO99n1+tndus4GCrtouy0M= github.com/hashicorp/vault/sdk v0.4.1/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/vault/sdk v0.5.0/go.mod h1:UJZHlfwj7qUJG8g22CuxUgkdJouFrBNvBHCyx8XAPdo= +github.com/hashicorp/vault/sdk v0.5.1/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU= github.com/hashicorp/vault/sdk v0.5.3/go.mod h1:DoGraE9kKGNcVgPmTuX357Fm6WAx1Okvde8Vp3dPDoU= github.com/hashicorp/vault/sdk v0.6.0 h1:6Z+In5DXHiUfZvIZdMx7e2loL1PPyDjA4bVh9ZTIAhs= github.com/hashicorp/vault/sdk v0.6.0/go.mod h1:+DRpzoXIdMvKc88R4qxr+edwy/RvH5QK8itmxLiDHLc= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I= github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hashicorp/yamux v0.1.0 h1:DzDIF6Sd7GD2sX0kDFpHAsJMY4L+OfTvtuaQsOYXxzk= +github.com/hashicorp/yamux v0.1.0/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= @@ -1710,8 +1756,9 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/in-toto/in-toto-golang v0.3.4-0.20211211042327-af1f9fb822bf h1:FU8tuL4IWx/Hq55AO4+13AZn3Kd6uk3Z44OCIZ9coTw= github.com/in-toto/in-toto-golang v0.3.4-0.20211211042327-af1f9fb822bf/go.mod h1:twl9XmClqj6/h/HANQQYaJZVKPPW/Mz53bd2t6UXGQA= +github.com/in-toto/in-toto-golang v0.3.4-0.20220709202702-fa494aaa0add h1:DAh7mHiRT7wc6kKepYdCpH16ElPciMPQWJaJ7H3l/ng= +github.com/in-toto/in-toto-golang v0.3.4-0.20220709202702-fa494aaa0add/go.mod h1:DQI8vlV6h6qSY/tCOoYKtxjWrkyiNpJ3WTV/WoBllmQ= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= @@ -1776,6 +1823,7 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= @@ -1790,20 +1838,26 @@ github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJz github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jedisct1/go-minisign v0.0.0-20210703085342-c1f07ee84431/go.mod h1:3VIJLjlf5Iako82IX/5KOoCzDmogK5mO+bl+DRItnR8= github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= -github.com/jenkins-x/go-scm v1.10.10/go.mod h1:z7xTO9/VzqW3xEbEMH2z5cpOGrZ8+nOHOWfU1ngFGxs= +github.com/jellydator/ttlcache/v2 v2.11.1 h1:AZGME43Eh2Vv3giG6GeqeLeFXxwxn1/qHItqWZl6U64= +github.com/jellydator/ttlcache/v2 v2.11.1/go.mod h1:RtE5Snf0/57e+2cLWFYWCCsLas2Hy3c5Z4n14XmSvTI= github.com/jenkins-x/go-scm v1.11.16/go.mod h1:GB6XjszezsDOxKTsPoyk4MT/cKw30qkPdJ4tml+MImg= +github.com/jenkins-x/go-scm v1.11.19 h1:H4CzaM/C/0QcCVLDh603Q6Bv4hqU4G3De2yQntWubqg= github.com/jenkins-x/go-scm v1.11.19/go.mod h1:eIcty4+tf6E7ycGOg0cUqnaLP+1LH1Z8zncQFQqRa3E= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= +github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jhump/protoreflect v1.9.0 h1:npqHz788dryJiR/l6K/RUQAyh2SwV91+d1dnh4RjO9w= -github.com/jhump/protoreflect v1.9.0/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jhump/protoreflect v1.10.3/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= +github.com/jhump/protoreflect v1.12.0 h1:1NQ4FpWMgn3by/n1X0fbeKEUxP1wBt7+Oitpv01HR10= +github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -1861,6 +1915,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -1869,9 +1924,7 @@ github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= @@ -1883,10 +1936,13 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.4/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.8 h1:JahtItbkWjf2jzm/T+qgMxkP9EMHsqEUA6vCMGmXvhA= +github.com/klauspost/compress v1.15.8/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1897,6 +1953,10 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -1915,7 +1975,6 @@ github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdB github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= @@ -1926,8 +1985,9 @@ github.com/lestrrat-go/httpcc v1.0.0/go.mod h1:tGS/u00Vh5N6FHNkExqGGNId8e0Big+++ github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= github.com/lestrrat-go/jwx v1.2.21/go.mod h1:9cfxnOH7G1gN75CaJP2hKGcxFEx5sPh1abRIA/ZJVh4= github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e h1:1aV3EJ4ZMsc63MFU4rB+ccSEhZvvVD71T9RA4Rqd3hI= github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e/go.mod h1:Bl3mfF2LHYepsU2XfzMceIglyByfPe1IFAXtO+p37Qk= +github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e h1:2ba+yBBeT8ZFyZjRLPDKvkqVrWX4CCYAuR6nuJGojD0= +github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e/go.mod h1:54WQpg5QI0mpRhxoj9bxysLqA5WJylVsLtXOrb3zAiU= github.com/letsencrypt/challtestsrv v1.2.1/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -1946,6 +2006,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= +github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucasb-eyer/go-colorful v0.0.0-20180526135729-345fbb3dbcdb/go.mod h1:NXg0ArsFk0Y01623LgUqoqcouGDB+PwCCQlrwrG6xJ4= @@ -1956,7 +2017,7 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/luna-duclos/instrumentedsql v1.1.3/go.mod h1:9J1njvFds+zN7y85EDhN9XNQLANWwZt2ULeIC8yMNYs= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.13.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -1964,12 +2025,9 @@ github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPK github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -1999,9 +2057,8 @@ github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.3 h1:YkaHmK1CzE5C4O7A3hv3TCbfNDPSCf0RKZFX+VhBeYk= -github.com/mattn/go-ieproxy v0.0.3/go.mod h1:6ZpRmhBaYuBX1U2za+9rC9iCGLsSp2tftelZne7CPko= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -2028,6 +2085,7 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v1.14.13/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -2039,13 +2097,14 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/maxbrunsfeld/counterfeiter/v6 v6.5.0/go.mod h1:fJ0UAZc1fx3xZhU4eSHQDJ1ApFmTVhp5VTpV9tm2ogg= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mediocregopher/radix/v4 v4.0.0/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= +github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= +github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= @@ -2055,6 +2114,7 @@ github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -2081,9 +2141,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -2118,11 +2176,14 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= +github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de/go.mod h1:kJun4WP5gFuHZgRjZUWWuH1DTxCtxbHDOIJsudS8jzY= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= @@ -2153,7 +2214,9 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJ github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI= github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= @@ -2216,7 +2279,7 @@ github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5h github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/open-policy-agent/opa v0.35.0/go.mod h1:xEmekKlk6/c+so5HF9wtPnGPXDfBuBsrMGhSHOHEF+U= +github.com/open-policy-agent/opa v0.44.0/go.mod h1:YpJaFIk5pq89n/k72c1lVvfvR5uopdJft2tMg1CW/yU= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -2227,6 +2290,7 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg= github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -2236,6 +2300,7 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -2247,6 +2312,7 @@ github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqi github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -2275,7 +2341,6 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -2283,10 +2348,10 @@ github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.0-beta.2/go.mod h1:+X+aW6gUj6Hda43TeYHVCIvYNG/jqY/8ZFXAeXXHl+Q= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -2294,6 +2359,7 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -2301,7 +2367,7 @@ github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -2337,8 +2403,10 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -2359,10 +2427,11 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -2375,17 +2444,17 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.1/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/protocolbuffers/txtpbfmt v0.0.0-20201118171849-f6a6b3f636fc/go.mod h1:KbKfKPy2I6ecOIGA9apfheFv14+P3RSmmQvshofQyMY= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= -github.com/pseudomuto/protoc-gen-doc v1.5.0/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= +github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= @@ -2431,7 +2500,6 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -2455,7 +2523,6 @@ github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiB github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= -github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= @@ -2471,7 +2538,6 @@ github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4Qn github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/secure-systems-lab/go-securesystemslib v0.2.0/go.mod h1:eIjBmIP8LD2MLBL/DkQWayLiz006Q4p+hCu79rvWleY= github.com/secure-systems-lab/go-securesystemslib v0.3.0/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= @@ -2491,25 +2557,26 @@ github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFA github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260 h1:xKXiRdBUtMVp64NaxACcyX4kvfmHJ9KrLU+JvyB1mdM= github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/sigstore/cosign v1.8.1-0.20220504185934-6ecf405f0b92 h1:2CnH+hTghtcQoSrY//f8aRnR3kny7zD1dlSw13j5vjw= -github.com/sigstore/cosign v1.8.1-0.20220504185934-6ecf405f0b92/go.mod h1:h3OwaH+xFKw7LC+JMqE7EM0x6358DYAfTyzeDP36mSQ= -github.com/sigstore/fulcio v0.1.2-0.20220114150912-86a2036f9bc7 h1:XE7A9lJ+wYhmUFBWYTaw3Ph943zHB4iBYd5R0SX0ZOA= -github.com/sigstore/fulcio v0.1.2-0.20220114150912-86a2036f9bc7/go.mod h1:ANQivY/lfOp9hN92S813LEthkm/kit96hzeIF3SNoZA= -github.com/sigstore/rekor v0.4.1-0.20220114213500-23f583409af3/go.mod h1:u9clLqaVjqV9pExVL1XkM37dGyMCOX/LMocS9nsnWDY= -github.com/sigstore/rekor v0.5.0 h1:YAVIdOLHTuzqV7XfZvlASxbkgylxaeThzusV5Tx8XeE= -github.com/sigstore/rekor v0.5.0/go.mod h1:nTpOwCPKuazkGfW/3Dp3iGWkgZL2Ogb2kBesAwz83eQ= -github.com/sigstore/sigstore v1.0.2-0.20211210190220-04746d994282/go.mod h1:SuM+QIHtnnR9eGsURRLv5JfxM6KeaU0XKA1O7FmLs4Q= -github.com/sigstore/sigstore v1.1.0/go.mod h1:gDpcHw4VwpoL5C6N1Ud1YtBsc+ikRDwDelDlWRyYoE8= -github.com/sigstore/sigstore v1.2.1-0.20220424143412-3d41663116d5 h1:8OL06Knchax4CMtdfquC3ASWQPtYMJgyeQImWQPw6XE= -github.com/sigstore/sigstore v1.2.1-0.20220424143412-3d41663116d5/go.mod h1:OvpZniSE9oRPnW7+mhxljRt2RAQU+TwcnhYbqQsPwPc= +github.com/sigstore/cosign v1.12.0 h1:4FtGar5z0tuor8p4arOEtgCkzMWyjFKYE4D1oJiPJ6Y= +github.com/sigstore/cosign v1.12.0/go.mod h1:gcWqjoMm2jhu5knf9HMWq5AS8CcnOeYXuamMUBj0Arg= +github.com/sigstore/fulcio v0.5.3 h1:fwdl2BHv1RjL3GJJ44T+tPsvmQ028zv54psxVhSwUGA= +github.com/sigstore/fulcio v0.5.3/go.mod h1:4yzMqOao6r9Nul1Dgt4LL7loKdkkgbDemLYrXUuAc+Y= +github.com/sigstore/rekor v0.11.0 h1:2x1Sy3fu3VSWbl/2fwTyFPqs5fehY++EqdTFWWT6+Mo= +github.com/sigstore/rekor v0.11.0/go.mod h1:xEfHnfiQJ/yJVCz41/OglUrDID71gICzixJjYFrQeN0= +github.com/sigstore/sigstore v1.2.1-0.20220526001230-8dc4fa90a468/go.mod h1:xAQdMn1pZ7FcOtHU6chqIsvVKt9KGb4mJZljPQUdcpA= +github.com/sigstore/sigstore v1.4.0/go.mod h1:z3kt1jm2A39M+g7emkQ8jdErL/haCMEjkNxvqTf41/k= +github.com/sigstore/sigstore v1.4.1-0.20220908204944-ec922cf4f1c2 h1:/UPRO/SWpsZHRVXE8IBOHuv+ZgqzjE5/dP0ilC6AlDI= +github.com/sigstore/sigstore v1.4.1-0.20220908204944-ec922cf4f1c2/go.mod h1:d0zef5dbdOvwBwQ0NlqO8pZuiFe4VtW12+ful2i1Nbw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -2531,6 +2598,7 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= @@ -2591,10 +2659,9 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= -github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= -github.com/spiffe/go-spiffe/v2 v2.1.0/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= +github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k= github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A= @@ -2634,31 +2701,33 @@ github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PK github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= -github.com/subosito/gotenv v1.4.0 h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs= github.com/subosito/gotenv v1.4.0/go.mod h1:mZd6rFysKEcUhUHXJk0C/08wAgyDBFuwEYL7vWWGaGo= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tektoncd/chains v0.12.1-0.20220901150427-1bf8faaf4475 h1:VczsrAAw9SglPDe5/eA2Y/F1Yw/jIzg13eMQwQd8FaQ= -github.com/tektoncd/chains v0.12.1-0.20220901150427-1bf8faaf4475/go.mod h1:0LO194VyF44dmdlopSwLEfnwWueUIx/AOdGzScIdV7s= +github.com/tektoncd/chains v0.12.1-0.20220920205308-b34353430a40 h1:6DckQq9Bt9NreKtcNwsRkqaoKWwXKbStSO87oFXS+nc= +github.com/tektoncd/chains v0.12.1-0.20220920205308-b34353430a40/go.mod h1:V4a9i6bkpuZbYvqJy7sKiyYYfFqenH3QlWhkgpLZRFY= github.com/tektoncd/hub v1.9.0 h1://BQzhSD7NIlEcg/vdrWctNJDRnfVNlvu2CRqIBK8eA= github.com/tektoncd/hub v1.9.0/go.mod h1:kt9KAIvIcSmFxS3g+P3dY7d3AH4yxS9JXZodzoHJHVY= -github.com/tektoncd/pipeline v0.37.2/go.mod h1:ZZOSGj1vCeK/xONQGcxBs+m17NzCXNNOqglCDhOPwjY= github.com/tektoncd/pipeline v0.38.2/go.mod h1:9uQZ6PdOZXPtoceupLMyChXUR6elsTuHpVNlEGAIJXU= -github.com/tektoncd/pipeline v0.39.0 h1:FobBVL5HuR57XUqbASwkhb6ZUaJTtlPn3rXh2ssy8Pg= github.com/tektoncd/pipeline v0.39.0/go.mod h1:wOhlnZ3Z6gRFBN9kR4geMCu2VAcQ4DmX44adwOZcz0U= +github.com/tektoncd/pipeline v0.40.0 h1:g1xbNA/IvLK2eH0AaAqX8InxbyF03y49pEKhVaqARR4= +github.com/tektoncd/pipeline v0.40.0/go.mod h1:DqabXb6NBN/CMMZn20UXiGkdHEgMfRDvh2r6g/YEQ50= github.com/tektoncd/plumbing v0.0.0-20220304154415-13228ac1f4a4/go.mod h1:b9esRuV1absBvaPzKkjYdKXjC5Tgs8/vh1sz++RiTdc= github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f/go.mod h1:b9esRuV1absBvaPzKkjYdKXjC5Tgs8/vh1sz++RiTdc= github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb h1:LUUCR8pLF+MzdQ7kOQQrMzDahIPZLdPCzfnNow1Um3Y= github.com/tektoncd/plumbing v0.0.0-20220817140952-3da8ce01aeeb/go.mod h1:uJBaI0AL/kjPThiMYZcWRujEz7D401v643d6s/21GAg= -github.com/tektoncd/resolution v0.0.0-20220331203013-e4203c70c5eb h1:bSrsnmoOJJh1JorlaTDSusq/eIBnk5zRKAVXOhnJyD8= github.com/tektoncd/resolution v0.0.0-20220331203013-e4203c70c5eb/go.mod h1:u7+LospaKMTW8f1mKHpul2XmGXYSG86kMrbJqUr2w0Q= github.com/tektoncd/triggers v0.21.0 h1:9k/sRHLZQC8AxDYAgcEE+atUoRIaWrehbro3EVXC9es= github.com/tektoncd/triggers v0.21.0/go.mod h1:80lSjy3A11KiZ9FhzD7qWX6ev5M+cEwTdKfryUXueyE= @@ -2669,10 +2738,10 @@ github.com/tent/canonical-json-go v0.0.0-20130607151641-96e4ba3a7613/go.mod h1:g github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= -github.com/theupdateframework/go-tuf v0.0.0-20211203210025-7ded50136bf9/go.mod h1:n2n6wwC9BEnYS/C/APAtNln0eM5zYAYOkOTx6VEG/mA= -github.com/theupdateframework/go-tuf v0.0.0-20220127213825-87caa18db2a6/go.mod h1:I0Gs4Tev4hYQ5wiNqN8VJ7qS0gw7KOZNQuckC624RmE= -github.com/theupdateframework/go-tuf v0.0.0-20220211205608-f0c3294f63b9 h1:U8bHY5mmNuZHc3+e7l2/LAmfTk7oiMiB3Qn8fsp4z5g= -github.com/theupdateframework/go-tuf v0.0.0-20220211205608-f0c3294f63b9/go.mod h1:ENa0O55YQfI0U/nn4AAuqPydrbkqQCiq9GDw6YLCyXU= +github.com/theupdateframework/go-tuf v0.3.0/go.mod h1:E5XP0wXitrFUHe4b8cUcAAdxBW4LbfnqF4WXXGLgWNo= +github.com/theupdateframework/go-tuf v0.3.1/go.mod h1:lhHZ3Vt2pdAh15h0Cc6gWdlI+Okn2ZznD3q/cNjd5jw= +github.com/theupdateframework/go-tuf v0.5.0 h1:aQ7i9CBw4q9QEZifCaW6G8qGQwoN23XGaZkOA+F50z4= +github.com/theupdateframework/go-tuf v0.5.0/go.mod h1:vAqWV3zEs89byeFsAYoh/Q14vJTgJkHwnnRCWBBBINY= github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -2692,6 +2761,8 @@ github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLD github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tjfoc/gmsm v1.3.2 h1:7JVkAn5bvUJ7HtU08iW6UiD+UTmJTIToHCfeFzkcCxM= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= @@ -2706,6 +2777,8 @@ github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E= +github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A= github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/go-tsz v0.0.0-20180814235614-0bd30b3df1c3/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/vegeta/v12 v12.8.4/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho= @@ -2717,6 +2790,8 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljT github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= @@ -2726,8 +2801,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= -github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.7 h1:aXiFAgRugfJ27UFDsGJ9DB2FvTC73hlVXFSqq5bo9eU= +github.com/urfave/cli v1.22.7/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= @@ -2741,7 +2816,7 @@ github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/V github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= @@ -2758,13 +2833,16 @@ github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgq github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= github.com/weppos/publicsuffix-go v0.15.1-0.20210807195340-dc689ff0bb59/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/weppos/publicsuffix-go v0.15.1-0.20220329081811-9a40b608a236/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.15.1-0.20220413065649-906f534b73a4/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/withfig/autocomplete-tools/packages/cobra v0.0.0-20220122124547-31d3821a6898/go.mod h1:cKObXQ6PVFO7bHUd5jpApXvMIt55Ewz7UdMiC05ONxI= +github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1/go.mod h1:nmuySobZb4kFgFy6BptpXp/BBw+xFSyvVPP6auoJB4k= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/go-gitlab v0.64.0 h1:rMgQdW9S1w3qvNAH2LYpFd2xh7KNLk+JWJd7sorNuTc= -github.com/xanzy/go-gitlab v0.64.0/go.mod h1:F0QEXwmqiBUxCgJm8fE9S+1veX4XC9Z4cfaAbqwk4YM= +github.com/xanzy/go-gitlab v0.73.1 h1:UMagqUZLJdjss1SovIC+kJCH4k2AZWXl58gJd38Y/hI= +github.com/xanzy/go-gitlab v0.73.1/go.mod h1:d/a0vswScO7Agg1CZNz15Ic6SSvBG9vfw8egL99t4kA= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -2776,7 +2854,6 @@ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6 github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -2791,29 +2868,29 @@ github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6Ut github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= -github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= -github.com/ysmood/goob v0.3.0/go.mod h1:S3lq113Y91y1UBf1wj1pFOxeahvfKkCk6mTWTWbDdWs= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= -github.com/ysmood/got v0.15.1/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY= -github.com/ysmood/got v0.23.3/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY= -github.com/ysmood/gotrace v0.2.2/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM= +github.com/ysmood/got v0.29.1/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY= +github.com/ysmood/got v0.31.3/go.mod h1:pE1l4LOwOBhQg6A/8IAatkGp7uZjnalzrZolnlhhMgY= github.com/ysmood/gotrace v0.6.0/go.mod h1:TzhIG7nHDry5//eYZDYcTzuJLYQIkykJzCRIo4/dzQM= -github.com/ysmood/gson v0.6.4/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/gson v0.7.1 h1:zKL2MTGtynxdBdlZjyGsvEOZ7dkxaY5TH6QhAbTgz0Q= github.com/ysmood/gson v0.7.1/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= -github.com/ysmood/leakless v0.7.0 h1:XCGdaPExyoreoQd+H5qgxM3ReNbSPFsEXpSKwbXbwQw= +github.com/ysmood/gson v0.7.2 h1:1iWUvpi5DPvd2j59W7ifRPR9DiAZ3Ga+fmMl1mJrRbM= +github.com/ysmood/gson v0.7.2/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= github.com/ysmood/leakless v0.7.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= +github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= +github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -2849,57 +2926,61 @@ go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3C go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0 h1:se+XckWlVTTfwjZSsAZJ2zGPzmIMq3j7fKBCmHoB9UA= +go.etcd.io/etcd/api/v3 v3.6.0-alpha.0/go.mod h1:z13pg39zewDLZeXIKeM0xELOeFKcqjLocfwl5M820+w= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0 h1:2UyRzFWbZZzgu/xzxoRukgixvafiJtGyxO+3IKUyJ6c= +go.etcd.io/etcd/client/pkg/v3 v3.6.0-alpha.0/go.mod h1:Vl/FkH40bHqmBFwhr8WVKtV47neyts36zl1voccRq8s= go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= -go.etcd.io/etcd/client/v2 v2.305.4 h1:Dcx3/MYyfKcPNLpR4VVQUP5KgYrBeJtktBwEKkw08Ao= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 h1:9VRJ698EFIMfjOQtcjKMM7CWXOIxp9R4I8JA1mk+WT4= +go.etcd.io/etcd/client/v2 v2.306.0-alpha.0/go.mod h1:eW78BCfOzS1HJgTNzDrb2E6xV1p6kqlpLpKkz7ErzCs= go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.4 h1:p83BUL3tAYS0OT/r0qglgc3M1JjhM0diV8DSWAhVXv4= go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 h1:hHaJ8CvTPJ9iv7xPz3G0gxt3csEqJW8evgty/kYICwo= +go.etcd.io/etcd/client/v3 v3.6.0-alpha.0/go.mod h1:a9JuChoQBDnw7WclHYBYCtTOIC12Wwj+Fw0LX4TI/Gs= go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo= -go.etcd.io/etcd/etcdctl/v3 v3.5.0 h1:i8DGjR9gBRoS6NEHF3XBxxh7QwL1DyilXMCkHpyy6zM= -go.etcd.io/etcd/etcdctl/v3 v3.5.0/go.mod h1:vGTfKdsh87RI7kA2JHFBEGxjQEYx+pi299wqEOdi34M= -go.etcd.io/etcd/etcdutl/v3 v3.5.0 h1:orNfs85GWmiOl0p23Yi9YRfHNb3Qfdlt0wVFkPTRVxQ= -go.etcd.io/etcd/etcdutl/v3 v3.5.0/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg= +go.etcd.io/etcd/etcdctl/v3 v3.5.4/go.mod h1:SMZep1Aj7sUmMSBCHTjkZL/Yw36Vx5Ux61fKbopbb5U= +go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0 h1:3J+c4Av+pF7dBMAnxZVMrfCCMTaBz4CGJ8En3sZMNME= +go.etcd.io/etcd/etcdctl/v3 v3.6.0-alpha.0/go.mod h1:0ugckElRKx3OrV15/WAylLv2Ji67QxXKTh9lytkOh8s= +go.etcd.io/etcd/etcdutl/v3 v3.5.4/go.mod h1:eK9eZfI/BxDQCztpuaJ1E/ufYpMw2Y16dPX1azGWrBU= +go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0 h1:DZwDkrq/z5nHxXtovJMk9fyR6Nc+pwCJt25ptlFta24= +go.etcd.io/etcd/etcdutl/v3 v3.6.0-alpha.0/go.mod h1:0ILo94EKC+jgp/IMfxePlfJD1OVtMVfgTQ/xM8+joOA= go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= -go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0 h1:cV/VsaYde/tcc2G9aHN5DQwx6CtUsWSEW4UqYzXuyyk= +go.etcd.io/etcd/pkg/v3 v3.6.0-alpha.0/go.mod h1:tXqWms0MpOJAS6L0B9nhFqZr0C/WEYzj/OtN90G8xzo= go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= -go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w= +go.etcd.io/etcd/raft/v3 v3.6.0-alpha.0 h1:BQ6CnNP4pIpy5rusFlTBxAacDgPXhuiHFwoTsBNsVpI= +go.etcd.io/etcd/raft/v3 v3.6.0-alpha.0/go.mod h1:/kZdrBXlc5fUgYXfIEQ0B5sb7ejXPKbtF4jWzF1exiQ= go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= -go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0 h1:BQUVqBqNFZZyrRbfydrRLzq9hYvCcRj97SsX1YwD7CA= +go.etcd.io/etcd/server/v3 v3.6.0-alpha.0/go.mod h1:3QM2rLq3B3hSXmVEvgVt3vEEbG/AumSs0Is7EgrlKzU= go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30= -go.etcd.io/etcd/tests/v3 v3.5.0 h1:+uMuHYKKlLUzbW322XrQXbaGM9qiV7vUL+AEPT/KYY4= -go.etcd.io/etcd/tests/v3 v3.5.0/go.mod h1:f+mtZ1bE1YPvgKdOJV2BKy4JQW0nAFnQehgOE7+WyJE= +go.etcd.io/etcd/tests/v3 v3.5.4/go.mod h1:ymig8LjkI1zqAxxMsl+nntzG21dND2hh0UQXl9BaJP8= +go.etcd.io/etcd/tests/v3 v3.6.0-alpha.0 h1:3qrZ3p/E7CxdV1kKtAU75hHOcUoXcSTwC7ELKWyzMJo= +go.etcd.io/etcd/tests/v3 v3.6.0-alpha.0/go.mod h1:hFQkP/cTsZIXXvUv+BsGHZ3TK+76XZMi5GToYA94iac= go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To= -go.etcd.io/etcd/v3 v3.5.0 h1:fs7tB+L/xRDi/+p9qKuaPGCtMX6vkovLRXTqvEE98Ek= -go.etcd.io/etcd/v3 v3.5.0/go.mod h1:FldM0/VzcxYWLvWx1sdA7ghKw7C3L2DvUTzGrcEtsC4= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= -go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.etcd.io/etcd/v3 v3.5.4/go.mod h1:c6jK4IfuWwJU26FD9SeI4cAtvlfu9Iacaxu0vRses1k= +go.etcd.io/etcd/v3 v3.6.0-alpha.0 h1:c4c3xHs9tG097KtpLfBQJSD6c70xgEZbwkoj3gF6As4= +go.etcd.io/etcd/v3 v3.6.0-alpha.0/go.mod h1:9ERPHHuSr8Ho66trD/4f3+vSeqI/hk4loUSFUwj6Zcg= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.8.4 h1:NruvZPPL0PBcRJKmbswoWSrmHeUvzdxA3GCPfD/NEOA= go.mongodb.org/mongo-driver v1.8.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= +go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= @@ -2911,48 +2992,71 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib v1.3.0 h1:p9Gd+3dD7yB+AIph2Ltg11QDX6Y+yWMH0YQVTpTTP2c= -go.opentelemetry.io/contrib v1.3.0/go.mod h1:FlyPNX9s4U6MCsWEc5YAK4KzKNHFDsjrDUZijJiXvy8= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= +go.opentelemetry.io/contrib v1.6.0 h1:xJawAzMuR3s4Au5p/ABHqYFychHjK2AHB9JvkBuBbTA= +go.opentelemetry.io/contrib v1.6.0/go.mod h1:FlyPNX9s4U6MCsWEc5YAK4KzKNHFDsjrDUZijJiXvy8= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.26.1/go.mod h1:4wsfAAW5N9wUHM0QTmZS8z7fvYZ1rv3m+sVeSpf8NhU= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 h1:Ky1MObd188aGbgb5OgNnwGuEEwI9MVIcc7rBW6zk5Ak= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY= go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= +go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.1.0/go.mod h1:7cww0OW51jQ8IaZChIEdqLwgh+44+7uiTdWsAL0wQpA= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 h1:7Yxsak1q4XrJ5y7XBnNwqWx9amMZvoidCctv62XOQ6Y= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.1.0/go.mod h1:/E4iniSqAEvqbq6KM5qThKZR2sd42kDvD+SrYt00vRw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 h1:cMDtmgJ5FpRvqx9x2Aq+Mm0O6K/zcUkH73SFz20TuBw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.1.0/go.mod h1:Gyc0evUosTBVNRqTFGuu0xqebkEWLkLwv42qggTCwro= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 h1:MFAyzUPrTwLOwCi+cltN0ZVyy4phU41lwH+lyMyQTS4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU= go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= +go.opentelemetry.io/otel/sdk v1.1.0/go.mod h1:3aQvM6uLm6C4wJpHtT8Od3vNzeZ34Pqc6bps8MywWzo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0= +go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.1.0/go.mod h1:i47XtdcBQiktu5IsrPqOHe8w+sBmnLwwHt8wiUsWGTI= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= +go.opentelemetry.io/proto/otlp v0.10.0/go.mod h1:zG20xCK0szZ1xdokeSOwEcmlXu+x9kkdRe6N1DhKcfU= go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= -go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= -go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= +go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E= +go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= -go.step.sm/crypto v0.14.0/go.mod h1:3G0yQr5lQqfEG0CMYz8apC/qMtjLRQlzflL2AxkcN+g= +go.step.sm/cli-utils v0.7.3/go.mod h1:RJRwbBLqzs5nrepQLAV9FuT3fVpWz66tKzLIB7Izpfk= +go.step.sm/crypto v0.9.0/go.mod h1:+CYG05Mek1YDqi5WK0ERc6cOpKly2i/a5aZmU1sfGj0= +go.step.sm/crypto v0.17.2/go.mod h1:FXFiLBUsoE0OGz8JTjxhYU1rwKKNgVIb5izZTUMdc/8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -2984,23 +3088,24 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.20.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.22.0/go.mod h1:H4siCOZOrAolnUPJEkfaSjDqyP+BDS0DdDWzwcgt3+U= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +goa.design/goa v2.2.5+incompatible h1:mjAtiy7ZdZIkj974hpFxCR6bL69qprfV00Veu3Vybts= +goa.design/goa v2.2.5+incompatible/go.mod h1:NnzBwdNktihbNek+pPiFMQP9PPFsUt8MMPPyo9opDSo= goa.design/goa/v3 v3.7.12/go.mod h1:iAZRP2wqf2Fu++CWt7Qfoxe3iVMkKqlsFAEF2kcxs28= goa.design/goa/v3 v3.8.2 h1:OzL9YOxiDrGHoWNj1vvq6LczDZYksn2Igvt4ULcqS1g= goa.design/goa/v3 v3.8.2/go.mod h1:WVTAOi9ypvh2MOux4g6LKohAk3Vomv5r0A//fZrnLO8= goa.design/plugins/v3 v3.8.2/go.mod h1:D+OKEfavb6+tnYPMc9tMtpplgGzPOWULQ+UsEqRvdCE= gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= gocloud.dev v0.24.1-0.20211119014450-028788aaaa4c/go.mod h1:EIJSlY7nvfeoWaV2GauF6es27gZfqtTVon47QFueoyE= -gocloud.dev v0.25.0 h1:Y7vDq8xj7SyM848KXf32Krda2e6jQ4CLh/mTeCSqXtk= -gocloud.dev v0.25.0/go.mod h1:7HegHVCYZrMiU3IE1qtnzf/vRrDwLYnRNR3EhWX8x9Y= -gocloud.dev/docstore/mongodocstore v0.25.0 h1:F++AtU167zTKtntLHmZZESPZWVR0zYClxOxxzZU+KSA= -gocloud.dev/docstore/mongodocstore v0.25.0/go.mod h1:S+mz5Ng4IiPqcTDc8JhvyMVX8erXyyiAq50H8ak6UMc= -gocloud.dev/pubsub/kafkapubsub v0.25.0 h1:p6RxC7FDAs4vKFepVBuAQk7rJrLua4Z8c9VMOlPHE6U= -gocloud.dev/pubsub/kafkapubsub v0.25.0/go.mod h1:kFZGuOSh7U7wkl2c7f5M8JMzg2O4ET+u9UGO0uQ+oUo= +gocloud.dev v0.26.0 h1:4rM/SVL0lLs+rhC0Gmc+gt/82DBpb7nbpIZKXXnfMXg= +gocloud.dev v0.26.0/go.mod h1:mkUgejbnbLotorqDyvedJO20XcZNTynmSeVSQS9btVg= +gocloud.dev/docstore/mongodocstore v0.26.0 h1:hpsstzbNy/kUoyv+oQsyMq49x6k6vkYObuLXDeTZoBo= +gocloud.dev/docstore/mongodocstore v0.26.0/go.mod h1:PfGgT0AvArgHc35KGSjBXJ8uSk3zf7bnO+PRRRm1V94= +gocloud.dev/pubsub/kafkapubsub v0.26.0 h1:0hm0dx/Up5d0ZB8Voq5934GRY4wdYuTvteilrq+6I+o= +gocloud.dev/pubsub/kafkapubsub v0.26.0/go.mod h1:kVwluCr3np4ifON+4Zj+IqnEKm8AEuehM55FSzO+yOA= goji.io/v3 v3.0.0/go.mod h1:c02FFnNiVNCDo+DpR2IhBQpM9r5G1BG/MkHNTPUJ13U= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -3010,17 +3115,14 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -3029,9 +3131,11 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -3057,7 +3161,6 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211115234514-b4de73f9ece8/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -3066,8 +3169,11 @@ golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220331220935-ae2d96664a29/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -3125,7 +3231,6 @@ golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -3136,7 +3241,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -3169,7 +3273,6 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -3190,6 +3293,7 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -3205,27 +3309,29 @@ golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211111083644-e5c967477495/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220421235706-1d1ef9303861/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220516155154-20f960328961/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b h1:3ogNYyK4oIQdIKzTu68hQrr4iuVxF3AxKl9Aj/eDrw0= golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b h1:ZmngSVLe/wycRns9MKikG9OWIEjGcGAkacif7oYQaUY= +golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -3236,7 +3342,6 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -3253,8 +3358,11 @@ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -3286,7 +3394,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3342,6 +3449,7 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -3415,15 +3523,15 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211123173158-ef496fb156ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220110181412-a018aaa089fe/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3438,6 +3546,7 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -3447,16 +3556,18 @@ golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704 h1:Y7NOhdqIOU8kYI7BxsgL38d0ot0raxvcW+EMQU2QrT4= golang.org/x/sys v0.0.0-20220803195053-6e608f9ce704/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220907062415-87db552b00fd h1:AZeIEzg+8RCELJYq8w+ODLVxFgLMMigSwO/ffKPEd9U= +golang.org/x/sys v0.0.0-20220907062415-87db552b00fd/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -3465,8 +3576,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b h1:NXqSWXSRUSCaFuvitrWtU169I3876zRTalMRbfd6LL0= +golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -3477,8 +3589,10 @@ golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= +golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -3487,7 +3601,6 @@ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190228203856-589c23e65e65/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -3510,7 +3623,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -3553,6 +3665,7 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -3560,6 +3673,7 @@ golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512001501-aaeff5de670a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -3668,7 +3782,6 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= @@ -3704,8 +3817,13 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0 h1:8rJoHuRxx+vCmZtAO/3k1dRLvYNVyTJtZ5oaFZvhgvc= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.92.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0 h1:d1c24AAS01DYqXreBeuVV7ewY/U8Mnhh47pwtsgVtYg= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3772,16 +3890,18 @@ google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210325141258-5636347f2b14/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs= google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210406143921-e86de6bf7a46/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210420162539-3c870d7478d2/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210427215850-f767ed18ee4d/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= @@ -3813,7 +3933,6 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211207154714-918901c715cf/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -3835,11 +3954,12 @@ google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220401170504-314d38edb7de/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= @@ -3849,10 +3969,15 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220630174209-ad1d48641aa7/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220706132729-d86698d07c53/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= google.golang.org/genproto v0.0.0-20220715211116-798f69b842b9/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220803205849-8f55acc8769f h1:ywoA0TLvF/4n7P2lr/+bNRueYxWYUJZbRwV3hyYt8gY= +google.golang.org/genproto v0.0.0-20220720214146-176da50484ac/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= google.golang.org/genproto v0.0.0-20220803205849-8f55acc8769f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220804142021-4e6b2dfa6612/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58 h1:sRT5xdTkj1Kbk30qbYC7VyMj73N5pZYsw6v+Nrzdhno= +google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -3901,6 +4026,7 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/grpc/examples v0.0.0-20201112215255-90f1b3ee835b/go.mod h1:IBqQ7wSUJ2Ep09a8rMWFsg4fmI2r38zwsq8a0GgxXpM= google.golang.org/grpc/examples v0.0.0-20201130180447-c456688b1860/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -3939,8 +4065,6 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/h2non/gentleman.v1 v1.0.4/go.mod h1:JYuHVdFzS4MKOXe0o+chKJ4hCe6tqKKw9XH9YP6WFrg= gopkg.in/h2non/gock.v1 v1.0.16/go.mod h1:XVuDAssexPLwgxCLMvDTWNU5eqklsydR6I5phZ9oPB8= gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY= @@ -3950,18 +4074,20 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -3976,6 +4102,7 @@ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzW gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -3984,6 +4111,7 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -4031,6 +4159,7 @@ k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= +k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= k8s.io/api v0.23.9 h1:v7Ee2CZuyb6rVm1q4bUe7ZonWleLsrvgcOTxPGjQVa4= k8s.io/api v0.23.9/go.mod h1:r4g0GrGdLgwSYB90qgO4tBrbKtALBhUfut+oFt4ikCc= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= @@ -4046,6 +4175,7 @@ k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0 k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.9 h1:u9Pu7Ffe+9+QJUemtNjuCwvHSnOUeYEwgSHV+88Ne0g= k8s.io/apimachinery v0.23.9/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= @@ -4063,12 +4193,12 @@ k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= +k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4= k8s.io/client-go v0.23.9 h1:OKxNCL+nhw7UBB5b01OVuAV4Db/AdBdaV6/GYpucuOw= k8s.io/client-go v0.23.9/go.mod h1:sNo0X0MZqo4Uu0qDY5Fl5Y60cJFinBDWWUBOAM5JUCM= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/code-generator v0.23.4/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.23.6/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.9/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= @@ -4126,7 +4256,7 @@ knative.dev/hack v0.0.0-20220224013837-e1785985d364/go.mod h1:PHt8x8yX5Z9pPquBEf knative.dev/hack v0.0.0-20220318020218-14f832e506f8/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220328133751-f06773764ce3/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20220725145124-782bbaabb8a1/go.mod h1:t/azP8I/Cygaw+87O7rkAPrNRjCelmtfSzWzu/9TM7I= -knative.dev/hack/schema v0.0.0-20220224013837-e1785985d364/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= +knative.dev/hack v0.0.0-20220815132133-e9a8475f4329/go.mod h1:t/azP8I/Cygaw+87O7rkAPrNRjCelmtfSzWzu/9TM7I= knative.dev/hack/schema v0.0.0-20220328133751-f06773764ce3/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/networking v0.0.0-20220323170318-55757e9c20d6/go.mod h1:tI+j9UGI4eHeinQktrQpHNS0pZ+XII1yF7ZtGyemkm0= knative.dev/networking v0.0.0-20220404212543-dde40b019aff h1:pqzWi29qb44TY+5xtc9vty4mSyUYvojXZGCp0y/91eo= @@ -4135,8 +4265,9 @@ knative.dev/pkg v0.0.0-20220318133418-7f16595277b2/go.mod h1:nKJ2L4o7or3j58eqMK8 knative.dev/pkg v0.0.0-20220318185521-e6e3cf03d765/go.mod h1:nKJ2L4o7or3j58eqMK843kbIM0SiYnAXXsisfEQECS8= knative.dev/pkg v0.0.0-20220325200448-1f7514acd0c2/go.mod h1:5xt0nzCwxvQ2N4w71smY7pYm5nVrQ8qnRsMinSLVpio= knative.dev/pkg v0.0.0-20220329144915-0a1ec2e0d46c/go.mod h1:0A5D5tOLettuVoi5x+0SLGRfrvVemXXtLH247WupPJk= -knative.dev/pkg v0.0.0-20220805012121-7b8b06028e4f h1:kW4K5SsjZ7qMzM8TCqHdDmpv0xKN4Jje4BXhDcByFUI= knative.dev/pkg v0.0.0-20220805012121-7b8b06028e4f/go.mod h1:nBMKMJvyoaJdkpUrjwLVs/DwaP6d73R3UkXK6lblJyE= +knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15 h1:GNmzHVaUo3zoi/wtIN71LPQaWy6DdoYzmb+GIq2s4fw= +knative.dev/pkg v0.0.0-20220818004048-4a03844c0b15/go.mod h1:YLjXbkQLlGHok+u0FLfMbBHFzY9WGu3GHhnrptoAy8I= knative.dev/reconciler-test v0.0.0-20220328072550-7d32310c9b3a/go.mod h1:wlz1lGyn5fjJYL5PTSL/SOI4xgVpU+q6D4eaa19NsDA= knative.dev/serving v0.30.1-0.20220402124840-21c05dc9d9a4 h1:iRFWsFKsA5ddhi+eKZVJdf8gPBomTfjIyRAKk9Uh7Ko= knative.dev/serving v0.30.1-0.20220402124840-21c05dc9d9a4/go.mod h1:TIKeQ1Dvn/wfmgth1fpBeYi1Qf0TPlulnwUDwOdZN50= @@ -4147,6 +4278,7 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jC mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +oras.land/oras-go v1.2.0/go.mod h1:pFNs7oHp2dYsYMSS82HaX5l4mpnGO7hbpPN6EWH2ltc= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -4165,8 +4297,8 @@ sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0 sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= -sigs.k8s.io/release-utils v0.6.0 h1:wJDuzWJqPH4a5FAxAXE2aBvbB6UMIW7iYMhsKnIMQkA= -sigs.k8s.io/release-utils v0.6.0/go.mod h1:kR1/DuYCJ4covppUasYNcA11OixC9O37B/E0ejRfb+c= +sigs.k8s.io/release-utils v0.7.3 h1:6pS8x6c5RmdUgR9qcg1LO6hjUzuE4Yo9TGZ3DemrZdM= +sigs.k8s.io/release-utils v0.7.3/go.mod h1:n0mVez/1PZYZaZUTJmxewxH3RJ/Lf7JUDh7TG1CASOE= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/cmd/chain/payload.go b/pkg/cmd/chain/payload.go index 6e96cb117e..783d227b48 100644 --- a/pkg/cmd/chain/payload.go +++ b/pkg/cmd/chain/payload.go @@ -20,6 +20,7 @@ import ( "github.com/spf13/cobra" "github.com/tektoncd/chains/pkg/chains" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/cli/pkg/chain" "github.com/tektoncd/cli/pkg/cli" "github.com/tektoncd/cli/pkg/taskrun" @@ -95,7 +96,8 @@ func printPayloads(cs *cli.Clients, namespace string, tr *v1beta1.TaskRun, skipV } // Fetch the payload. - payloads, err := backend.RetrievePayloads(context.Background(), tr, opts) + trObj := objects.NewTaskRunObject(tr) + payloads, err := backend.RetrievePayloads(context.Background(), trObj, opts) if err != nil { return fmt.Errorf("error retrieving the payloads: %s", err) } diff --git a/pkg/cmd/chain/signature.go b/pkg/cmd/chain/signature.go index b9e608a216..37a33cb919 100644 --- a/pkg/cmd/chain/signature.go +++ b/pkg/cmd/chain/signature.go @@ -19,6 +19,7 @@ import ( "fmt" "github.com/spf13/cobra" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/cli/pkg/chain" "github.com/tektoncd/cli/pkg/cli" "github.com/tektoncd/cli/pkg/taskrun" @@ -77,7 +78,8 @@ func printSignatures(cs *cli.Clients, namespace string, tr *v1beta1.TaskRun) err } // Fetch the signature. - signatures, err := backend.RetrieveSignatures(context.Background(), tr, opts) + trObj := objects.NewTaskRunObject(tr) + signatures, err := backend.RetrieveSignatures(context.Background(), trObj, opts) if err != nil { return fmt.Errorf("error retrieving the signatures: %s", err) } diff --git a/vendor/bitbucket.org/creachadair/shell/bitbucket-pipelines.yml b/vendor/bitbucket.org/creachadair/shell/bitbucket-pipelines.yml index d4b1a14c91..8acd906c1b 100644 --- a/vendor/bitbucket.org/creachadair/shell/bitbucket-pipelines.yml +++ b/vendor/bitbucket.org/creachadair/shell/bitbucket-pipelines.yml @@ -16,14 +16,8 @@ definitions: pipelines: default: # run on each push - step: - image: golang:1.9 + image: golang:1.16 <<: *Verify - step: - image: golang:1.10 - <<: *Verify - - step: - image: golang:1.11 - <<: *Verify - - step: - image: golang:1.12 + image: golang:1.17 <<: *Verify diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 00b99da4a0..fa64b1a9b8 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,107 +1,107 @@ { "accessapproval": "1.3.0", "accesscontextmanager": "1.2.0", - "aiplatform": "1.13.0", - "analytics": "0.7.0", + "aiplatform": "1.14.0", + "analytics": "0.8.0", "apigateway": "1.2.0", "apigeeconnect": "1.2.0", "appengine": "1.3.0", - "area120": "0.3.0", + "area120": "0.4.0", "artifactregistry": "1.3.0", - "asset": "1.2.0", - "assuredworkloads": "0.6.0", - "automl": "1.3.0", - "baremetalsolution": "0.1.0", + "asset": "1.3.0", + "assuredworkloads": "1.0.0", + "automl": "1.4.0", + "baremetalsolution": "0.2.0", "batch": "0.1.0", "billing": "1.2.0", - "binaryauthorization": "0.6.0", + "binaryauthorization": "1.0.0", "certificatemanager": "0.2.0", - "channel": "1.6.0", + "channel": "1.7.0", "cloudbuild": "1.2.0", "clouddms": "1.2.0", - "cloudtasks": "1.3.0", + "cloudtasks": "1.4.0", "compute": "1.7.0", "contactcenterinsights": "1.2.0", "container": "1.2.0", - "containeranalysis": "0.3.0", + "containeranalysis": "0.4.0", "datacatalog": "1.3.0", - "dataflow": "0.4.0", + "dataflow": "0.5.0", "datafusion": "1.3.0", "datalabeling": "0.3.0", - "dataplex": "0.4.0", + "dataplex": "1.0.0", "dataproc": "1.5.0", - "dataqna": "0.3.0", - "datastream": "0.5.0", + "dataqna": "0.4.0", + "datastream": "1.0.0", "deploy": "1.2.0", - "dialogflow": "1.10.0", + "dialogflow": "1.11.0", "dlp": "1.4.0", "documentai": "1.4.0", - "domains": "0.4.0", + "domains": "0.5.0", "essentialcontacts": "1.2.0", "eventarc": "1.6.0", "filestore": "1.2.0", "functions": "1.4.0", - "gaming": "1.2.0", + "gaming": "1.3.0", "gkebackup": "0.1.0", "gkeconnect": "0.3.0", - "gkehub": "0.7.0", + "gkehub": "0.8.0", "gkemulticloud": "0.2.0", "grafeas": "0.2.0", "gsuiteaddons": "1.2.0", "iam": "0.3.0", "iap": "1.3.0", - "ids": "0.3.0", + "ids": "1.0.0", "iot": "1.2.0", "kms": "1.4.0", - "language": "1.2.0", - "lifesciences": "0.3.0", + "language": "1.3.0", + "lifesciences": "0.4.0", "managedidentities": "1.2.0", "mediatranslation": "0.3.0", - "memcache": "1.2.0", - "metastore": "1.2.0", + "memcache": "1.3.0", + "metastore": "1.3.0", "monitoring": "1.5.0", "networkconnectivity": "1.2.0", - "networkmanagement": "1.2.0", + "networkmanagement": "1.3.0", "networksecurity": "0.3.0", - "notebooks": "0.4.0", - "optimization": "0.1.0", + "notebooks": "1.0.0", + "optimization": "1.0.0", "orchestration": "1.2.0", "orgpolicy": "1.3.0", - "osconfig": "1.5.0", - "oslogin": "1.2.0", - "phishingprotection": "0.3.0", + "osconfig": "1.6.0", + "oslogin": "1.3.0", + "phishingprotection": "0.4.0", "policytroubleshooter": "1.2.0", - "privatecatalog": "0.3.0", + "privatecatalog": "0.4.0", "recaptchaenterprise/v2": "2.0.1", "recommendationengine": "0.2.0", - "recommender": "1.3.0", - "redis": "1.5.0", + "recommender": "1.4.0", + "redis": "1.6.0", "resourcemanager": "1.2.0", "resourcesettings": "1.2.0", "retail": "1.4.0", "run": "0.1.1", - "scheduler": "1.2.0", - "secretmanager": "1.4.0", + "scheduler": "1.3.0", + "secretmanager": "1.5.0", "security": "1.4.0", "securitycenter": "1.8.0", "servicecontrol": "1.3.0", - "servicedirectory": "1.2.0", + "servicedirectory": "1.3.0", "servicemanagement": "1.3.0", "serviceusage": "1.2.0", "shell": "1.2.0", - "speech": "1.4.0", + "speech": "1.5.0", "storagetransfer": "1.3.0", - "talent": "0.8.0", + "talent": "0.9.0", "texttospeech": "1.3.0", "tpu": "1.2.0", "trace": "1.2.0", "translate": "1.2.0", - "video": "1.6.0", - "videointelligence": "1.2.0", + "video": "1.7.0", + "videointelligence": "1.4.0", "vision/v2": "2.0.0", - "vmmigration": "0.3.0", + "vmmigration": "1.0.0", "vpcaccess": "1.2.0", - "webrisk": "1.2.0", + "webrisk": "1.3.0", "websecurityscanner": "1.2.0", - "workflows": "1.4.0" + "workflows": "1.5.0" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 52eec6a307..31924972e6 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.102.1" + ".": "0.103.0" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 0ced42279c..126a31b472 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + ## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1405d09674..50538b1d34 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -71,6 +71,7 @@ func newDefaultHTTPClient() *http.Client { KeepAlive: 30 * time.Second, }).Dial, }, + Timeout: 5 * time.Second, } } diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 72402161fd..1ee2e16676 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -122,7 +122,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/assuredworkloads/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { @@ -320,7 +320,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/binaryauthorization/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/binaryauthorization/apiv1beta1": { @@ -491,7 +491,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataplex/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/dataproc/apiv1": { @@ -536,7 +536,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastream/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/datastream/apiv1alpha1": { @@ -824,7 +824,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ids/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/iot/apiv1": { @@ -1031,7 +1031,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/notebooks/apiv1beta1": { @@ -1049,7 +1049,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/optimization/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/orchestration/airflow/service/apiv1": { @@ -1634,7 +1634,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/livestream/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/stitcher/apiv1": { @@ -1643,7 +1643,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { @@ -1673,6 +1673,15 @@ "release_level": "beta", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/videointelligence/apiv1p3beta1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1p3beta1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/videointelligence/latest/apiv1p3beta1", + "release_level": "beta", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/vision/apiv1p1beta1": { "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", "description": "Cloud Vision API", @@ -1697,7 +1706,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/vmmigration/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/vpcaccess/apiv1": { @@ -1742,7 +1751,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/workflows/latest/apiv1", - "release_level": "beta", + "release_level": "ga", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/workflows/apiv1beta": { diff --git a/vendor/github.com/tektoncd/resolution/LICENSE b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE similarity index 100% rename from vendor/github.com/tektoncd/resolution/LICENSE rename to vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/LICENSE diff --git a/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go new file mode 100644 index 0000000000..5706fe2c7b --- /dev/null +++ b/vendor/github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper/helper.go @@ -0,0 +1,51 @@ +package helper + +import ( + "fmt" + "github.com/aliyun/credentials-go/credentials" + "os" +) + +const ( + EnvRoleArn = "ALIBABA_CLOUD_ROLE_ARN" + EnvOidcProviderArn = "ALIBABA_CLOUD_OIDC_PROVIDER_ARN" + EnvOidcTokenFile = "ALIBABA_CLOUD_OIDC_TOKEN_FILE" +) + +func HaveOidcCredentialRequiredEnv() bool { + return os.Getenv(EnvRoleArn) != "" && + os.Getenv(EnvOidcProviderArn) != "" && + os.Getenv(EnvOidcTokenFile) != "" +} + +func NewOidcCredential(sessionName string) (credential credentials.Credential, err error) { + return GetOidcCredential(sessionName) +} + +// Deprecated: Use NewOidcCredential instead +func GetOidcCredential(sessionName string) (credential credentials.Credential, err error) { + roleArn := os.Getenv(EnvRoleArn) + oidcArn := os.Getenv(EnvOidcProviderArn) + tokenFile := os.Getenv(EnvOidcTokenFile) + if roleArn == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvRoleArn) + } + if oidcArn == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvOidcProviderArn) + } + if tokenFile == "" { + return nil, fmt.Errorf("environment variable %q is missing", EnvOidcTokenFile) + } + if _, err := os.Stat(tokenFile); err != nil { + return nil, fmt.Errorf("unable to read file at %q: %s", tokenFile, err) + } + + config := new(credentials.Config). + SetType("oidc_role_arn"). + SetOIDCProviderArn(oidcArn). + SetOIDCTokenFilePath(tokenFile). + SetRoleArn(roleArn). + SetRoleSessionName(sessionName) + + return credentials.NewCredential(config) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go index 71037169c4..3e3329b69e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -4,4 +4,4 @@ package version // Licensed under the MIT License. See License.txt in the project root for license information. // Number contains the semantic version of this SDK. -const Number = "v63.3.0" +const Number = "v66.0.0" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index fec416a9c4..b11eb07884 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -1,3 +1,5 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + # Azure Active Directory authentication for Go This is a standalone package for authenticating with Azure Active @@ -18,7 +20,7 @@ go get -u github.com/Azure/go-autorest/autorest/adal ## Usage -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). ### Register an Azure AD Application with secret @@ -88,7 +90,7 @@ An Active Directory application is required in order to use this library. An app ### Grant the necessary permissions Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) which can be assigned to a service principal of an Azure AD application depending of your needs. ``` @@ -104,7 +106,7 @@ It is also possible to define custom role definitions. az role definition create --role-definition role-definition.json ``` -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. +* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. ### Acquire Access Token diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 310be07ec3..1a9c8ab537 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -365,6 +365,25 @@ func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, err }) } +// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. +type ServicePrincipalFederatedSecret struct { + jwt string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + + v.Set("client_assertion", secret.jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") +} + // ServicePrincipalToken encapsulates a Token created for a Service Principal. type ServicePrincipalToken struct { inner servicePrincipalToken @@ -419,6 +438,8 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} case "ServicePrincipalAuthorizationCodeSecret": spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + case "ServicePrincipalFederatedSecret": + return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") default: return fmt.Errorf("unrecognized token type '%s'", secret["type"]) } @@ -665,6 +686,31 @@ func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clie ) } +// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwt == "" { + return nil, fmt.Errorf("parameter 'jwt' cannot be empty") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalFederatedSecret{ + jwt: jwt, + }, + callbacks..., + ) +} + type msiType int const ( @@ -1058,8 +1104,8 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource // AAD returns expires_in as a string, ADFS returns it as an int ExpiresIn json.Number `json:"expires_in"` - // expires_on can be in two formats, a UTC time stamp or the number of seconds. - ExpiresOn string `json:"expires_on"` + // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. + ExpiresOn interface{} `json:"expires_on"` NotBefore json.Number `json:"not_before"` Resource string `json:"resource"` @@ -1072,7 +1118,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } expiresOn := json.Number("") // ADFS doesn't include the expires_on field - if token.ExpiresOn != "" { + if token.ExpiresOn != nil { if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) } @@ -1089,18 +1135,27 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource } // converts expires_on to the number of seconds -func parseExpiresOn(s string) (json.Number, error) { - // convert the expiration date to the number of seconds from now +func parseExpiresOn(s interface{}) (json.Number, error) { + // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 + asFloat64, ok := s.(float64) + if ok { + // this is the number of seconds as int case + return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil + } + asStr, ok := s.(string) + if !ok { + return "", fmt.Errorf("unexpected expires_on type %T", s) + } + // convert the expiration date to the number of seconds from the unix epoch timeToDuration := func(t time.Time) json.Number { - dur := t.Sub(time.Now().UTC()) - return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10)) + return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) } - if _, err := strconv.ParseInt(s, 10, 64); err == nil { + if _, err := json.Number(asStr).Int64(); err == nil { // this is the number of seconds case, no conversion required - return json.Number(s), nil - } else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil { + return json.Number(asStr), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { return timeToDuration(eo), nil - } else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil { + } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { return timeToDuration(eo), nil } else { // unknown format @@ -1317,12 +1372,25 @@ func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTena // MSIAvailable returns true if the MSI endpoint is available for authentication. func MSIAvailable(ctx context.Context, s Sender) bool { + msiType, _, err := getMSIType() + + if err != nil { + return false + } + + if msiType != msiTypeIMDS { + return true + } + if s == nil { s = sender() } + resp, err := getMSIEndpoint(ctx, s) + if err == nil { resp.Body.Close() } + return err == nil } diff --git a/vendor/github.com/PaesslerAG/gval/.travis.yml b/vendor/github.com/PaesslerAG/gval/.travis.yml deleted file mode 100644 index 681fe46681..0000000000 --- a/vendor/github.com/PaesslerAG/gval/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -script: -- go test -bench=. -benchmem -timeout 10m -coverprofile coverage.out -- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN -- go test -bench=Random -benchtime 5m -timeout 30m -benchmem -coverprofile coverage.out - -go: "1.11" diff --git a/vendor/github.com/PaesslerAG/gval/LICENSE b/vendor/github.com/PaesslerAG/gval/LICENSE deleted file mode 100644 index 0716dbca1e..0000000000 --- a/vendor/github.com/PaesslerAG/gval/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2017, Paessler AG -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/gval/README.md b/vendor/github.com/PaesslerAG/gval/README.md deleted file mode 100644 index b88fe455d6..0000000000 --- a/vendor/github.com/PaesslerAG/gval/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# Gval - -[![Godoc](https://godoc.org/github.com/PaesslerAG/gval?status.png)](https://godoc.org/github.com/PaesslerAG/gval) -[![Build Status](https://api.travis-ci.org/PaesslerAG/gval.svg?branch=master)](https://travis-ci.org/PaesslerAG/gval) -[![Coverage Status](https://coveralls.io/repos/github/PaesslerAG/gval/badge.svg?branch=master)](https://coveralls.io/github/PaesslerAG/gval?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/PaesslerAG/gval)](https://goreportcard.com/report/github.com/PaesslerAG/gval) - -Gval (Go eVALuate) provides support for evaluating arbitrary expressions, in particular Go-like expressions. - -![gopher](./prtg-batmin-gopher.png) - -## Evaluate - -Gval can evaluate expressions with parameters, arimethetic, logical, and string operations: - -- basic expression: [10 > 0](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Basic) -- parameterized expression: [foo > 0](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Parameter) -- nested parameterized expression: [foo.bar > 0](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--NestedParameter) -- arithmetic expression: [(requests_made * requests_succeeded / 100) >= 90](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Arithmetic) -- string expression: [http_response_body == "service is ok"](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--String) -- float64 expression: [(mem_used / total_mem) * 100](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Float64) - -It can easily be extended with custom functions or operators: - -- custom date comparator: [date(\`2014-01-02\`) > date(\`2014-01-01 23:59:59\`)](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--DateComparison) -- string length: [strlen("someReallyLongInputString") <= 16](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Strlen) - -You can parse gval.Expressions once and re-use them multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once: - -- [Parsing and Evaluation](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluable) - -The normal Go-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parentheses to clarify which portions of an expression should be run first. - -Strings, numbers, and booleans can be used like in Go: - -- [(7 < "47" == true ? "hello world!\n\u263a") + \` more text\`](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Encoding) - -## Parameter - -Variables can be accessed via string literals. They can be used for values with string keys if the parameter is a `map[string]interface{}` or `map[interface{}]interface{}` and for fields or methods if the parameter is a struct. - -- [foo > 0](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Parameter) - -### Bracket Selector - -Map and array elements and Struct Field can be accessed via `[]`. - -- [foo[0]](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Array) -- [foo["b" + "a" + "r"]](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--ExampleEvaluate_ComplexAccessor) - -### Dot Selector - -A nested variable with a name containing only letters and underscores can be accessed via a dot selector. - -- [foo.bar > 0](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--NestedParameter) - -### Custom Selector - -Parameter names like `response-time` will be interpreted as `response` minus `time`. While gval doesn't support these parameter names directly, you can easily access them via a custom extension like [JSON Path](https://github.com/PaesslerAG/jsonpath): - -- [$["response-time"]](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Jsonpath) - -Jsonpath is also suitable for accessing array elements. - -### Fields and Methods - -If you have structs in your parameters, you can access their fields and methods in the usual way: - -- [foo.Hello + foo.World()](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--FlatAccessor) - -It also works if the parameter is a struct directly -[Hello + World()](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--Accessor) -or if the fields are nested -[foo.Hello + foo.World()](https://godoc.org/github.com/PaesslerAG/gval/#example-Evaluate--NestedAccessor) - -This may be convenient but note that using accessors on strucs makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). If there are functions you want to use, it's faster (and probably cleaner) to define them as functions (see the Evaluate section). These approaches use no reflection, and are designed to be fast and clean. - -## Default Language - -The default language is in serveral sub languages like text, arithmetic or propositional logic defined. See [Godoc](https://godoc.org/github.com/PaesslerAG/gval/#Gval) for details. All sub languages are merged into gval.Full which contains the following elements: - -- Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` -- Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` -- Logical ops: `||` `&&` -- Numeric constants, as 64-bit floating point (`12345.678`) -- String constants (double quotes: `"foobar"`) -- Date function 'Date(x)', using any permutation of RFC3339, ISO8601, ruby date, or unix date -- Boolean constants: `true` `false` -- Parentheses to control order of evaluation `(` `)` -- Json Arrays : `[1, 2, "foo"]` -- Json Objects : `{"a":1, "b":2, "c":"foo"}` -- Prefixes: `!` `-` `~` -- Ternary conditional: `?` `:` -- Null coalescence: `??` - -## Customize - -Gval is completly customizable. Every constant, function or operator can be defined separately and existing expression languages can be reused: - -- [foo.Hello + foo.World()](https://godoc.org/github.com/PaesslerAG/gval/#example-Language) - -For details see [Godoc](https://godoc.org/github.com/PaesslerAG/gval). - -### External gval Languages - -A list of external libraries for gval. Feel free to add your own library. - -- [gvalstrings](https://github.com/generikvault/gvalstrings) parse single quoted strings in gval. -- [jsonpath](https://github.com/PaesslerAG/jsonpath) full support for jsonpath in gval. - -## Performance - -The library is built with the intention of being quick but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. -If performance is an issue, make sure to create your expression language with all functions, constants and operators only once. Evaluating an expression like gval.Evaluate("expression, const1, func1, func2, ...) creates a new gval.Language everytime it is called and slows execution. - -The library comes with a bunch of benchmarks to measure the performance of parsing and evaluating expressions. You can run them with `go test -bench=.`. - -For a very rough idea of performance, here are the results from a benchmark run on a Dell Latitude E7470 Win 10 i5-6300U. - -``` text -BenchmarkGval/const_evaluation-4 500000000 3.57 ns/op -BenchmarkGval/const_parsing-4 1000000 1144 ns/op -BenchmarkGval/single_parameter_evaluation-4 10000000 165 ns/op -BenchmarkGval/single_parameter_parsing-4 1000000 1648 ns/op -BenchmarkGval/parameter_evaluation-4 5000000 352 ns/op -BenchmarkGval/parameter_parsing-4 500000 2773 ns/op -BenchmarkGval/common_evaluation-4 3000000 434 ns/op -BenchmarkGval/common_parsing-4 300000 4419 ns/op -BenchmarkGval/complex_evaluation-4 100000000 11.6 ns/op -BenchmarkGval/complex_parsing-4 100000 17936 ns/op -BenchmarkGval/literal_evaluation-4 300000000 3.84 ns/op -BenchmarkGval/literal_parsing-4 500000 2559 ns/op -BenchmarkGval/modifier_evaluation-4 500000000 3.54 ns/op -BenchmarkGval/modifier_parsing-4 500000 3755 ns/op -BenchmarkGval/regex_evaluation-4 50000 21347 ns/op -BenchmarkGval/regex_parsing-4 200000 6480 ns/op -BenchmarkGval/constant_regex_evaluation-4 1000000 1000 ns/op -BenchmarkGval/constant_regex_parsing-4 200000 9417 ns/op -BenchmarkGval/accessors_evaluation-4 3000000 417 ns/op -BenchmarkGval/accessors_parsing-4 1000000 1778 ns/op -BenchmarkGval/accessors_method_evaluation-4 1000000 1931 ns/op -BenchmarkGval/accessors_method_parsing-4 1000000 1729 ns/op -BenchmarkGval/accessors_method_parameter_evaluation-4 1000000 2162 ns/op -BenchmarkGval/accessors_method_parameter_parsing-4 500000 2618 ns/op -BenchmarkGval/nested_accessors_evaluation-4 2000000 681 ns/op -BenchmarkGval/nested_accessors_parsing-4 1000000 2115 ns/op -BenchmarkRandom-4 500000 3631 ns/op -ok -``` - -## API Breaks - -Gval is designed with easy expandability in mind and API breaks will be avoided if possible. If API breaks are unavoidable they wil be explicitly stated via an increased major version number. - -------------------------------------- -Credits to Reene French for the gophers. diff --git a/vendor/github.com/PaesslerAG/gval/evaluable.go b/vendor/github.com/PaesslerAG/gval/evaluable.go deleted file mode 100644 index 5238854c31..0000000000 --- a/vendor/github.com/PaesslerAG/gval/evaluable.go +++ /dev/null @@ -1,334 +0,0 @@ -package gval - -import ( - "context" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// Evaluable evaluates given parameter -type Evaluable func(c context.Context, parameter interface{}) (interface{}, error) - -//EvalInt evaluates given parameter to an int -func (e Evaluable) EvalInt(c context.Context, parameter interface{}) (int, error) { - v, err := e(c, parameter) - if err != nil { - return 0, err - } - - f, ok := convertToFloat(v) - if !ok { - return 0, fmt.Errorf("expected number but got %v (%T)", v, v) - } - return int(f), nil -} - -//EvalFloat64 evaluates given parameter to an int -func (e Evaluable) EvalFloat64(c context.Context, parameter interface{}) (float64, error) { - v, err := e(c, parameter) - if err != nil { - return 0, err - } - - f, ok := convertToFloat(v) - if !ok { - return 0, fmt.Errorf("expected number but got %v (%T)", v, v) - } - return f, nil -} - -//EvalBool evaluates given parameter to a bool -func (e Evaluable) EvalBool(c context.Context, parameter interface{}) (bool, error) { - v, err := e(c, parameter) - if err != nil { - return false, err - } - - b, ok := convertToBool(v) - if !ok { - return false, fmt.Errorf("expected bool but got %v (%T)", v, v) - } - return b, nil -} - -//EvalString evaluates given parameter to a string -func (e Evaluable) EvalString(c context.Context, parameter interface{}) (string, error) { - o, err := e(c, parameter) - if err != nil { - return "", err - } - return fmt.Sprintf("%v", o), nil -} - -//Const Evaluable represents given constant -func (*Parser) Const(value interface{}) Evaluable { - return constant(value) -} - -func constant(value interface{}) Evaluable { - return func(c context.Context, v interface{}) (interface{}, error) { - return value, nil - } -} - -//Var Evaluable represents value at given path. -//It supports with default language VariableSelector: -// map[interface{}]interface{}, -// map[string]interface{} and -// []interface{} and via reflect -// struct fields, -// struct methods, -// slices and -// map with int or string key. -func (p *Parser) Var(path ...Evaluable) Evaluable { - if p.Language.selector == nil { - return variable(path) - } - return p.Language.selector(path) -} - -// Evaluables is a slice of Evaluable. -type Evaluables []Evaluable - -// EvalStrings evaluates given parameter to a string slice -func (evs Evaluables) EvalStrings(c context.Context, parameter interface{}) ([]string, error) { - strs := make([]string, len(evs)) - for i, p := range evs { - k, err := p.EvalString(c, parameter) - if err != nil { - return nil, err - } - strs[i] = k - } - return strs, nil -} - -func variable(path Evaluables) Evaluable { - return func(c context.Context, v interface{}) (interface{}, error) { - keys, err := path.EvalStrings(c, v) - if err != nil { - return nil, err - } - for i, k := range keys { - switch o := v.(type) { - case map[interface{}]interface{}: - v = o[k] - continue - case map[string]interface{}: - v = o[k] - continue - case []interface{}: - if i, err := strconv.Atoi(k); err == nil && i >= 0 && len(o) > i { - v = o[i] - continue - } - default: - var ok bool - v, ok = reflectSelect(k, o) - if !ok { - return nil, fmt.Errorf("unknown parameter %s", strings.Join(keys[:i+1], ".")) - } - } - } - return v, nil - } -} - -func reflectSelect(key string, value interface{}) (selection interface{}, ok bool) { - vv := reflect.ValueOf(value) - vvElem := resolvePotentialPointer(vv) - - switch vvElem.Kind() { - case reflect.Map: - mapKey, ok := reflectConvertTo(vv.Type().Key().Kind(), key) - if !ok { - return nil, false - } - - vvElem = vv.MapIndex(reflect.ValueOf(mapKey)) - vvElem = resolvePotentialPointer(vvElem) - - if vvElem.IsValid() { - return vvElem.Interface(), true - } - case reflect.Slice: - if i, err := strconv.Atoi(key); err == nil && i >= 0 && vv.Len() > i { - vvElem = resolvePotentialPointer(vv.Index(i)) - return vvElem.Interface(), true - } - case reflect.Struct: - field := vvElem.FieldByName(key) - if field.IsValid() { - return field.Interface(), true - } - - method := vv.MethodByName(key) - if method.IsValid() { - return method.Interface(), true - } - } - return nil, false -} - -func resolvePotentialPointer(value reflect.Value) reflect.Value { - if value.Kind() == reflect.Ptr { - return value.Elem() - } - return value -} - -func reflectConvertTo(k reflect.Kind, value string) (interface{}, bool) { - switch k { - case reflect.String: - return value, true - case reflect.Int: - if i, err := strconv.Atoi(value); err == nil { - return i, true - } - } - return nil, false -} - -func (*Parser) callFunc(fun function, args ...Evaluable) Evaluable { - return func(c context.Context, v interface{}) (ret interface{}, err error) { - a := make([]interface{}, len(args)) - for i, arg := range args { - ai, err := arg(c, v) - if err != nil { - return nil, err - } - a[i] = ai - } - return fun(a...) - } -} - -func (*Parser) callEvaluable(fullname string, fun Evaluable, args ...Evaluable) Evaluable { - return func(c context.Context, v interface{}) (ret interface{}, err error) { - f, err := fun(c, v) - - if err != nil { - return nil, fmt.Errorf("could not call function: %v", err) - } - - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("failed to execute function '%s': %s", fullname, r) - ret = nil - } - }() - - ff := reflect.ValueOf(f) - - if ff.Kind() != reflect.Func { - return nil, fmt.Errorf("could not call '%s' type %T", fullname, f) - } - - a := make([]reflect.Value, len(args)) - for i := range args { - arg, err := args[i](c, v) - if err != nil { - return nil, err - } - a[i] = reflect.ValueOf(arg) - } - - rr := ff.Call(a) - - r := make([]interface{}, len(rr)) - for i, e := range rr { - r[i] = e.Interface() - } - - errorInterface := reflect.TypeOf((*error)(nil)).Elem() - if len(r) > 0 && ff.Type().Out(len(r)-1).Implements(errorInterface) { - if r[len(r)-1] != nil { - err = r[len(r)-1].(error) - } - r = r[0 : len(r)-1] - } - - switch len(r) { - case 0: - return err, nil - case 1: - return r[0], err - default: - return r, err - } - } -} - -//IsConst returns if the Evaluable is a Parser.Const() value -func (e Evaluable) IsConst() bool { - pc := reflect.ValueOf(constant(nil)).Pointer() - pe := reflect.ValueOf(e).Pointer() - return pc == pe -} - -func regEx(a, b Evaluable) (Evaluable, error) { - if !b.IsConst() { - return func(c context.Context, o interface{}) (interface{}, error) { - a, err := a.EvalString(c, o) - if err != nil { - return nil, err - } - b, err := b.EvalString(c, o) - if err != nil { - return nil, err - } - matched, err := regexp.MatchString(b, a) - return matched, err - }, nil - } - s, err := b.EvalString(nil, nil) - if err != nil { - return nil, err - } - regex, err := regexp.Compile(s) - if err != nil { - return nil, err - } - return func(c context.Context, v interface{}) (interface{}, error) { - s, err := a.EvalString(c, v) - if err != nil { - return nil, err - } - return regex.MatchString(s), nil - }, nil -} - -func notRegEx(a, b Evaluable) (Evaluable, error) { - if !b.IsConst() { - return func(c context.Context, o interface{}) (interface{}, error) { - a, err := a.EvalString(c, o) - if err != nil { - return nil, err - } - b, err := b.EvalString(c, o) - if err != nil { - return nil, err - } - matched, err := regexp.MatchString(b, a) - return !matched, err - }, nil - } - s, err := b.EvalString(nil, nil) - if err != nil { - return nil, err - } - regex, err := regexp.Compile(s) - if err != nil { - return nil, err - } - return func(c context.Context, v interface{}) (interface{}, error) { - s, err := a.EvalString(c, v) - if err != nil { - return nil, err - } - return !regex.MatchString(s), nil - }, nil -} diff --git a/vendor/github.com/PaesslerAG/gval/functions.go b/vendor/github.com/PaesslerAG/gval/functions.go deleted file mode 100644 index 396040f7c9..0000000000 --- a/vendor/github.com/PaesslerAG/gval/functions.go +++ /dev/null @@ -1,73 +0,0 @@ -package gval - -import ( - "fmt" - "reflect" -) - -type function func(arguments ...interface{}) (interface{}, error) - -func toFunc(f interface{}) function { - if f, ok := f.(func(arguments ...interface{}) (interface{}, error)); ok { - return function(f) - } - return func(args ...interface{}) (interface{}, error) { - fun := reflect.ValueOf(f) - t := fun.Type() - - in, err := createCallArguments(t, args) - if err != nil { - return nil, err - } - out := fun.Call(in) - - r := make([]interface{}, len(out)) - for i, e := range out { - r[i] = e.Interface() - } - - err = nil - errorInterface := reflect.TypeOf((*error)(nil)).Elem() - if len(r) > 0 && t.Out(len(r)-1).Implements(errorInterface) { - if r[len(r)-1] != nil { - err = r[len(r)-1].(error) - } - r = r[0 : len(r)-1] - } - - switch len(r) { - case 0: - return nil, err - case 1: - return r[0], err - default: - return r, err - } - } -} - -func createCallArguments(t reflect.Type, args []interface{}) ([]reflect.Value, error) { - variadic := t.IsVariadic() - numIn := t.NumIn() - - if (!variadic && len(args) != numIn) || (variadic && len(args) < numIn-1) { - return nil, fmt.Errorf("invalid number of parameters") - } - - in := make([]reflect.Value, len(args)) - var inType reflect.Type - for i, arg := range args { - if !variadic || i < numIn-1 { - inType = t.In(i) - } else if i == numIn-1 { - inType = t.In(numIn - 1).Elem() - } - argVal := reflect.ValueOf(arg) - if arg == nil || !argVal.Type().AssignableTo(inType) { - return nil, fmt.Errorf("expected type %s for parameter %d but got %T", - inType.String(), i, arg) - } - in[i] = argVal - } - return in, nil -} diff --git a/vendor/github.com/PaesslerAG/gval/gval.go b/vendor/github.com/PaesslerAG/gval/gval.go deleted file mode 100644 index ef6a5fcc2d..0000000000 --- a/vendor/github.com/PaesslerAG/gval/gval.go +++ /dev/null @@ -1,262 +0,0 @@ -// Package gval provides a generic expression language. -// All functions, infix and prefix operators can be replaced by composing languages into a new one. -// -// The package contains concrete expression languages for common application in text, arithmetic, propositional logic and so on. -// They can be used as basis for a custom expression language or to evaluate expressions directly. -package gval - -import ( - "context" - "fmt" - "math" - "reflect" - "text/scanner" - "time" -) - -//Evaluate given parameter with given expression in gval full language -func Evaluate(expression string, parameter interface{}, opts ...Language) (interface{}, error) { - l := full - if len(opts) > 0 { - l = NewLanguage(append([]Language{l}, opts...)...) - } - return l.Evaluate(expression, parameter) -} - -// Full is the union of Arithmetic, Bitmask, Text, PropositionalLogic, and Json -// Operator in: a in b is true iff value a is an element of array b -// Operator ??: a ?? b returns a if a is not false or nil, otherwise n -// Operator ?: a ? b : c returns b if bool a is true, otherwise b -// -// Function Date: Date(a) parses string a. a must match RFC3339, ISO8601, ruby date, or unix date -func Full(extensions ...Language) Language { - if len(extensions) == 0 { - return full - } - return NewLanguage(append([]Language{full}, extensions...)...) -} - -// Arithmetic contains base, plus(+), minus(-), divide(/), power(**), negative(-) -// and numerical order (<=,<,>,>=) -// -// Arithmetic operators expect float64 operands. -// Called with unfitting input, they try to convert the input to float64. -// They can parse strings and convert any type of int or float. -func Arithmetic() Language { - return arithmetic -} - -// Bitmask contains base, bitwise and(&), bitwise or(|) and bitwise not(^). -// -// Bitmask operators expect float64 operands. -// Called with unfitting input they try to convert the input to float64. -// They can parse strings and convert any type of int or float. -func Bitmask() Language { - return bitmask -} - -// Text contains base, lexical order on strings (<=,<,>,>=), -// regex match (=~) and regex not match (!~) -func Text() Language { - return text -} - -// PropositionalLogic contains base, not(!), and (&&), or (||) and Base. -// -// Propositional operator expect bool operands. -// Called with unfitting input they try to convert the input to bool. -// Numbers other than 0 and the strings "TRUE" and "true" are interpreted as true. -// 0 and the strings "FALSE" and "false" are interpreted as false. -func PropositionalLogic() Language { - return propositionalLogic -} - -// JSON contains json objects ({string:expression,...}) -// and json arrays ([expression, ...]) -func JSON() Language { - return ljson -} - -// Base contains equal (==) and not equal (!=), perentheses and general support for variables, constants and functions -// It contains true, false, (floating point) number, string ("" or ``) and char ('') constants -func Base() Language { - return base -} - -var full = NewLanguage(arithmetic, bitmask, text, propositionalLogic, ljson, - - InfixOperator("in", inArray), - - InfixShortCircuit("??", func(a interface{}) (interface{}, bool) { - return a, a != false && a != nil - }), - InfixOperator("??", func(a, b interface{}) (interface{}, error) { - if a == false || a == nil { - return b, nil - } - return a, nil - }), - - PostfixOperator("?", parseIf), - - Function("date", func(arguments ...interface{}) (interface{}, error) { - if len(arguments) != 1 { - return nil, fmt.Errorf("date() expects exactly one string argument") - } - s, ok := arguments[0].(string) - if !ok { - return nil, fmt.Errorf("date() expects exactly one string argument") - } - for _, format := range [...]string{ - time.ANSIC, - time.UnixDate, - time.RubyDate, - time.Kitchen, - time.RFC3339, - time.RFC3339Nano, - "2006-01-02", // RFC 3339 - "2006-01-02 15:04", // RFC 3339 with minutes - "2006-01-02 15:04:05", // RFC 3339 with seconds - "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone - "2006-01-02T15Z0700", // ISO8601 with hour - "2006-01-02T15:04Z0700", // ISO8601 with minutes - "2006-01-02T15:04:05Z0700", // ISO8601 with seconds - "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds - } { - ret, err := time.ParseInLocation(format, s, time.Local) - if err == nil { - return ret, nil - } - } - return nil, fmt.Errorf("date() could not parse %s", s) - }), -) - -var ljson = NewLanguage( - PrefixExtension('[', parseJSONArray), - PrefixExtension('{', parseJSONObject), -) - -var arithmetic = NewLanguage( - InfixNumberOperator("+", func(a, b float64) (interface{}, error) { return a + b, nil }), - InfixNumberOperator("-", func(a, b float64) (interface{}, error) { return a - b, nil }), - InfixNumberOperator("*", func(a, b float64) (interface{}, error) { return a * b, nil }), - InfixNumberOperator("/", func(a, b float64) (interface{}, error) { return a / b, nil }), - InfixNumberOperator("%", func(a, b float64) (interface{}, error) { return math.Mod(a, b), nil }), - InfixNumberOperator("**", func(a, b float64) (interface{}, error) { return math.Pow(a, b), nil }), - - InfixNumberOperator(">", func(a, b float64) (interface{}, error) { return a > b, nil }), - InfixNumberOperator(">=", func(a, b float64) (interface{}, error) { return a >= b, nil }), - InfixNumberOperator("<", func(a, b float64) (interface{}, error) { return a < b, nil }), - InfixNumberOperator("<=", func(a, b float64) (interface{}, error) { return a <= b, nil }), - - InfixNumberOperator("==", func(a, b float64) (interface{}, error) { return a == b, nil }), - InfixNumberOperator("!=", func(a, b float64) (interface{}, error) { return a != b, nil }), - - base, -) - -var bitmask = NewLanguage( - InfixNumberOperator("^", func(a, b float64) (interface{}, error) { return float64(int64(a) ^ int64(b)), nil }), - InfixNumberOperator("&", func(a, b float64) (interface{}, error) { return float64(int64(a) & int64(b)), nil }), - InfixNumberOperator("|", func(a, b float64) (interface{}, error) { return float64(int64(a) | int64(b)), nil }), - InfixNumberOperator("<<", func(a, b float64) (interface{}, error) { return float64(int64(a) << uint64(b)), nil }), - InfixNumberOperator(">>", func(a, b float64) (interface{}, error) { return float64(int64(a) >> uint64(b)), nil }), - - PrefixOperator("~", func(c context.Context, v interface{}) (interface{}, error) { - i, ok := convertToFloat(v) - if !ok { - return nil, fmt.Errorf("unexpected %T expected number", v) - } - return float64(^int64(i)), nil - }), -) - -var text = NewLanguage( - InfixTextOperator("+", func(a, b string) (interface{}, error) { return fmt.Sprintf("%v%v", a, b), nil }), - - InfixTextOperator("<", func(a, b string) (interface{}, error) { return a < b, nil }), - InfixTextOperator("<=", func(a, b string) (interface{}, error) { return a <= b, nil }), - InfixTextOperator(">", func(a, b string) (interface{}, error) { return a > b, nil }), - InfixTextOperator(">=", func(a, b string) (interface{}, error) { return a >= b, nil }), - - InfixEvalOperator("=~", regEx), - InfixEvalOperator("!~", notRegEx), - base, -) - -var propositionalLogic = NewLanguage( - PrefixOperator("!", func(c context.Context, v interface{}) (interface{}, error) { - b, ok := convertToBool(v) - if !ok { - return nil, fmt.Errorf("unexpected %T expected bool", v) - } - return !b, nil - }), - - InfixShortCircuit("&&", func(a interface{}) (interface{}, bool) { return false, a == false }), - InfixBoolOperator("&&", func(a, b bool) (interface{}, error) { return a && b, nil }), - InfixShortCircuit("||", func(a interface{}) (interface{}, bool) { return true, a == true }), - InfixBoolOperator("||", func(a, b bool) (interface{}, error) { return a || b, nil }), - - InfixBoolOperator("==", func(a, b bool) (interface{}, error) { return a == b, nil }), - InfixBoolOperator("!=", func(a, b bool) (interface{}, error) { return a != b, nil }), - - base, -) - -var base = NewLanguage( - PrefixExtension(scanner.Int, parseNumber), - PrefixExtension(scanner.Float, parseNumber), - PrefixOperator("-", func(c context.Context, v interface{}) (interface{}, error) { - i, ok := convertToFloat(v) - if !ok { - return nil, fmt.Errorf("unexpected %v(%T) expected number", v, v) - } - return -i, nil - }), - - PrefixExtension(scanner.String, parseString), - PrefixExtension(scanner.Char, parseString), - PrefixExtension(scanner.RawString, parseString), - - Constant("true", true), - Constant("false", false), - - InfixOperator("==", func(a, b interface{}) (interface{}, error) { return reflect.DeepEqual(a, b), nil }), - InfixOperator("!=", func(a, b interface{}) (interface{}, error) { return !reflect.DeepEqual(a, b), nil }), - PrefixExtension('(', parseParentheses), - - Precedence("??", 0), - - Precedence("||", 20), - Precedence("&&", 21), - - Precedence("==", 40), - Precedence("!=", 40), - Precedence(">", 40), - Precedence(">=", 40), - Precedence("<", 40), - Precedence("<=", 40), - Precedence("=~", 40), - Precedence("!~", 40), - Precedence("in", 40), - - Precedence("^", 60), - Precedence("&", 60), - Precedence("|", 60), - - Precedence("<<", 90), - Precedence(">>", 90), - - Precedence("+", 120), - Precedence("-", 120), - - Precedence("*", 150), - Precedence("/", 150), - Precedence("%", 150), - - Precedence("**", 200), - - PrefixMetaPrefix(scanner.Ident, parseIdent), -) diff --git a/vendor/github.com/PaesslerAG/gval/language.go b/vendor/github.com/PaesslerAG/gval/language.go deleted file mode 100644 index 582f3e2cb9..0000000000 --- a/vendor/github.com/PaesslerAG/gval/language.go +++ /dev/null @@ -1,238 +0,0 @@ -package gval - -import ( - "context" - "fmt" - "text/scanner" - "unicode" -) - -// Language is an expression language -type Language struct { - prefixes map[interface{}]prefix - operators map[string]operator - operatorSymbols map[rune]struct{} - selector func(Evaluables) Evaluable -} - -// NewLanguage returns the union of given Languages as new Language. -func NewLanguage(bases ...Language) Language { - l := newLanguage() - for _, base := range bases { - for i, e := range base.prefixes { - l.prefixes[i] = e - } - for i, e := range base.operators { - l.operators[i] = e.merge(l.operators[i]) - l.operators[i].initiate(i) - } - for i := range base.operatorSymbols { - l.operatorSymbols[i] = struct{}{} - } - if base.selector != nil { - l.selector = base.selector - } - } - return l -} - -func newLanguage() Language { - return Language{ - prefixes: map[interface{}]prefix{}, - operators: map[string]operator{}, - operatorSymbols: map[rune]struct{}{}, - } -} - -// NewEvaluable returns an Evaluable for given expression in the specified language -func (l Language) NewEvaluable(expression string) (Evaluable, error) { - p := newParser(expression, l) - - eval, err := p.ParseExpression(context.Background()) - - if err == nil && p.isCamouflaged() && p.lastScan != scanner.EOF { - err = p.camouflage - } - - if err != nil { - pos := p.scanner.Pos() - return nil, fmt.Errorf("parsing error: %s - %d:%d %s", p.scanner.Position, pos.Line, pos.Column, err) - } - return eval, nil -} - -// Evaluate given parameter with given expression -func (l Language) Evaluate(expression string, parameter interface{}) (interface{}, error) { - eval, err := l.NewEvaluable(expression) - if err != nil { - return nil, err - } - v, err := eval(context.Background(), parameter) - if err != nil { - return nil, fmt.Errorf("can not evaluate %s: %v", expression, err) - } - return v, nil -} - -// Function returns a Language with given function. -// Function has no conversion for input types. -// -// If the function returns an error it must be the last return parameter. -// -// If the function has (without the error) more then one return parameter, -// it returns them as []interface{}. -func Function(name string, function interface{}) Language { - l := newLanguage() - l.prefixes[name] = func(c context.Context, p *Parser) (eval Evaluable, err error) { - args := []Evaluable{} - scan := p.Scan() - switch scan { - case '(': - args, err = p.parseArguments(c) - if err != nil { - return nil, err - } - default: - p.Camouflage("function call", '(') - } - return p.callFunc(toFunc(function), args...), nil - } - return l -} - -// Constant returns a Language with given constant -func Constant(name string, value interface{}) Language { - l := newLanguage() - l.prefixes[l.makePrefixKey(name)] = func(c context.Context, p *Parser) (eval Evaluable, err error) { - return p.Const(value), nil - } - return l -} - -// PrefixExtension extends a Language -func PrefixExtension(r rune, ext func(context.Context, *Parser) (Evaluable, error)) Language { - l := newLanguage() - l.prefixes[r] = ext - return l -} - -// PrefixMetaPrefix chooses a Prefix to be executed -func PrefixMetaPrefix(r rune, ext func(context.Context, *Parser) (call string, alternative func() (Evaluable, error), err error)) Language { - l := newLanguage() - l.prefixes[r] = func(c context.Context, p *Parser) (Evaluable, error) { - call, alternative, err := ext(c, p) - if err != nil { - return nil, err - } - if prefix, ok := p.prefixes[l.makePrefixKey(call)]; ok { - return prefix(c, p) - } - return alternative() - } - return l -} - -//PrefixOperator returns a Language with given prefix -func PrefixOperator(name string, e Evaluable) Language { - l := newLanguage() - l.prefixes[l.makePrefixKey(name)] = func(c context.Context, p *Parser) (Evaluable, error) { - eval, err := p.ParseNextExpression(c) - if err != nil { - return nil, err - } - prefix := func(c context.Context, v interface{}) (interface{}, error) { - a, err := eval(c, v) - if err != nil { - return nil, err - } - return e(c, a) - } - if eval.IsConst() { - v, err := prefix(context.Background(), nil) - if err != nil { - return nil, err - } - prefix = p.Const(v) - } - return prefix, nil - } - return l -} - -// PostfixOperator extends a Language. -func PostfixOperator(name string, ext func(context.Context, *Parser, Evaluable) (Evaluable, error)) Language { - l := newLanguage() - l.operators[l.makeInfixKey(name)] = postfix{ - f: func(c context.Context, p *Parser, eval Evaluable, pre operatorPrecedence) (Evaluable, error) { - return ext(c, p, eval) - }, - } - return l -} - -// InfixOperator for two arbitrary values. -func InfixOperator(name string, f func(a, b interface{}) (interface{}, error)) Language { - return newLanguageOperator(name, &infix{arbitrary: f}) -} - -// InfixShortCircuit operator is called after the left operand is evaluated. -func InfixShortCircuit(name string, f func(a interface{}) (interface{}, bool)) Language { - return newLanguageOperator(name, &infix{shortCircuit: f}) -} - -// InfixTextOperator for two text values. -func InfixTextOperator(name string, f func(a, b string) (interface{}, error)) Language { - return newLanguageOperator(name, &infix{text: f}) -} - -// InfixNumberOperator for two number values. -func InfixNumberOperator(name string, f func(a, b float64) (interface{}, error)) Language { - return newLanguageOperator(name, &infix{number: f}) -} - -// InfixBoolOperator for two bool values. -func InfixBoolOperator(name string, f func(a, b bool) (interface{}, error)) Language { - return newLanguageOperator(name, &infix{boolean: f}) -} - -// Precedence of operator. The Operator with higher operatorPrecedence is evaluated first. -func Precedence(name string, operatorPrecendence uint8) Language { - return newLanguageOperator(name, operatorPrecedence(operatorPrecendence)) -} - -// InfixEvalOperator operates on the raw operands. -// Therefore it cannot be combined with operators for other operand types. -func InfixEvalOperator(name string, f func(a, b Evaluable) (Evaluable, error)) Language { - return newLanguageOperator(name, directInfix{infixBuilder: f}) -} - -func newLanguageOperator(name string, op operator) Language { - op.initiate(name) - l := newLanguage() - l.operators[l.makeInfixKey(name)] = op - return l -} - -func (l *Language) makePrefixKey(key string) interface{} { - runes := []rune(key) - if len(runes) == 1 && !unicode.IsLetter(runes[0]) { - return runes[0] - } - return key -} - -func (l *Language) makeInfixKey(key string) string { - runes := []rune(key) - for _, r := range runes { - l.operatorSymbols[r] = struct{}{} - } - return key -} - -// VariableSelector returns a Language which uses given variable selector. -// It must be combined with a Language that uses the vatiable selector. E.g. gval.Base(). -func VariableSelector(selector func(path Evaluables) Evaluable) Language { - l := newLanguage() - l.selector = selector - return l -} diff --git a/vendor/github.com/PaesslerAG/gval/operator.go b/vendor/github.com/PaesslerAG/gval/operator.go deleted file mode 100644 index 6c34744ba7..0000000000 --- a/vendor/github.com/PaesslerAG/gval/operator.go +++ /dev/null @@ -1,314 +0,0 @@ -package gval - -import ( - "context" - "fmt" - "reflect" - "strconv" -) - -type stage struct { - Evaluable - infixBuilder - operatorPrecedence -} - -type stageStack []stage //operatorPrecedence in stacktStage is continuously, monotone ascending - -func (s *stageStack) push(b stage) error { - for len(*s) > 0 && s.peek().operatorPrecedence >= b.operatorPrecedence { - a := s.pop() - eval, err := a.infixBuilder(a.Evaluable, b.Evaluable) - if err != nil { - return err - } - if a.IsConst() && b.IsConst() { - v, err := eval(nil, nil) - if err != nil { - return err - } - b.Evaluable = constant(v) - continue - } - b.Evaluable = eval - } - *s = append(*s, b) - return nil -} - -func (s *stageStack) peek() stage { - return (*s)[len(*s)-1] -} - -func (s *stageStack) pop() stage { - a := s.peek() - (*s) = (*s)[:len(*s)-1] - return a -} - -type infixBuilder func(a, b Evaluable) (Evaluable, error) - -func (l Language) isSymbolOperation(r rune) bool { - _, in := l.operatorSymbols[r] - return in -} - -func (op *infix) initiate(name string) { - f := func(a, b interface{}) (interface{}, error) { - return nil, fmt.Errorf("invalid operation (%T) %s (%T)", a, name, b) - } - if op.arbitrary != nil { - f = op.arbitrary - } - for _, typeConvertion := range []bool{true, false} { - if op.text != nil && (!typeConvertion || op.arbitrary == nil) { - f = getStringOpFunc(op.text, f, typeConvertion) - } - if op.boolean != nil { - f = getBoolOpFunc(op.boolean, f, typeConvertion) - } - if op.number != nil { - f = getFloatOpFunc(op.number, f, typeConvertion) - } - } - if op.shortCircuit == nil { - op.builder = func(a, b Evaluable) (Evaluable, error) { - return func(c context.Context, x interface{}) (interface{}, error) { - a, err := a(c, x) - if err != nil { - return nil, err - } - b, err := b(c, x) - if err != nil { - return nil, err - } - return f(a, b) - }, nil - } - return - } - shortF := op.shortCircuit - op.builder = func(a, b Evaluable) (Evaluable, error) { - return func(c context.Context, x interface{}) (interface{}, error) { - a, err := a(c, x) - if err != nil { - return nil, err - } - if r, ok := shortF(a); ok { - return r, nil - } - b, err := b(c, x) - if err != nil { - return nil, err - } - return f(a, b) - }, nil - } - return -} - -type opFunc func(a, b interface{}) (interface{}, error) - -func getStringOpFunc(s func(a, b string) (interface{}, error), f opFunc, typeConversion bool) opFunc { - if typeConversion { - return func(a, b interface{}) (interface{}, error) { - if a != nil && b != nil { - return s(fmt.Sprintf("%v", a), fmt.Sprintf("%v", b)) - } - return f(a, b) - } - } - return func(a, b interface{}) (interface{}, error) { - s1, k := a.(string) - s2, l := b.(string) - if k && l { - return s(s1, s2) - } - return f(a, b) - } -} -func convertToBool(o interface{}) (bool, bool) { - if b, ok := o.(bool); ok { - return b, true - } - v := reflect.ValueOf(o) - for o != nil && v.Kind() == reflect.Ptr { - v = v.Elem() - o = v.Interface() - } - if o == false || o == nil || o == "false" || o == "FALSE" { - return false, true - } - if o == true || o == "true" || o == "TRUE" { - return true, true - } - if f, ok := convertToFloat(o); ok { - return f != 0., true - } - return false, false -} -func getBoolOpFunc(o func(a, b bool) (interface{}, error), f opFunc, typeConversion bool) opFunc { - if typeConversion { - return func(a, b interface{}) (interface{}, error) { - x, k := convertToBool(a) - y, l := convertToBool(b) - if k && l { - return o(x, y) - } - return f(a, b) - } - } - return func(a, b interface{}) (interface{}, error) { - x, k := a.(bool) - y, l := b.(bool) - if k && l { - return o(x, y) - } - return f(a, b) - } -} -func convertToFloat(o interface{}) (float64, bool) { - if i, ok := o.(float64); ok { - return i, true - } - v := reflect.ValueOf(o) - for o != nil && v.Kind() == reflect.Ptr { - v = v.Elem() - o = v.Interface() - } - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return float64(v.Uint()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - } - if s, ok := o.(string); ok { - f, err := strconv.ParseFloat(s, 64) - if err == nil { - return f, true - } - } - return 0, false -} -func getFloatOpFunc(o func(a, b float64) (interface{}, error), f opFunc, typeConversion bool) opFunc { - if typeConversion { - return func(a, b interface{}) (interface{}, error) { - x, k := convertToFloat(a) - y, l := convertToFloat(b) - if k && l { - return o(x, y) - } - - return f(a, b) - } - } - return func(a, b interface{}) (interface{}, error) { - x, k := a.(float64) - y, l := b.(float64) - if k && l { - return o(x, y) - } - - return f(a, b) - } -} - -type operator interface { - merge(operator) operator - precedence() operatorPrecedence - initiate(name string) -} - -type operatorPrecedence uint8 - -func (pre operatorPrecedence) merge(op operator) operator { - if op, ok := op.(operatorPrecedence); ok { - if op > pre { - return op - } - return pre - } - if op == nil { - return pre - } - return op.merge(pre) -} - -func (pre operatorPrecedence) precedence() operatorPrecedence { - return pre -} - -func (pre operatorPrecedence) initiate(name string) {} - -type infix struct { - operatorPrecedence - number func(a, b float64) (interface{}, error) - boolean func(a, b bool) (interface{}, error) - text func(a, b string) (interface{}, error) - arbitrary func(a, b interface{}) (interface{}, error) - shortCircuit func(a interface{}) (interface{}, bool) - builder infixBuilder -} - -func (op infix) merge(op2 operator) operator { - switch op2 := op2.(type) { - case *infix: - if op2.number != nil { - op.number = op2.number - } - if op2.boolean != nil { - op.boolean = op2.boolean - } - if op2.text != nil { - op.text = op2.text - } - if op2.arbitrary != nil { - op.arbitrary = op2.arbitrary - } - if op2.shortCircuit != nil { - op.shortCircuit = op2.shortCircuit - } - } - if op2 != nil && op2.precedence() > op.operatorPrecedence { - op.operatorPrecedence = op2.precedence() - } - return &op -} - -type directInfix struct { - operatorPrecedence - infixBuilder -} - -func (op directInfix) merge(op2 operator) operator { - switch op2 := op2.(type) { - case operatorPrecedence: - op.operatorPrecedence = op2 - } - if op2 != nil && op2.precedence() > op.operatorPrecedence { - op.operatorPrecedence = op2.precedence() - } - return op -} - -type prefix func(context.Context, *Parser) (Evaluable, error) - -type postfix struct { - operatorPrecedence - f func(context.Context, *Parser, Evaluable, operatorPrecedence) (Evaluable, error) -} - -func (op postfix) merge(op2 operator) operator { - switch op2 := op2.(type) { - case postfix: - if op2.f != nil { - op.f = op2.f - } - } - if op2 != nil && op2.precedence() > op.operatorPrecedence { - op.operatorPrecedence = op2.precedence() - } - return op -} diff --git a/vendor/github.com/PaesslerAG/gval/parse.go b/vendor/github.com/PaesslerAG/gval/parse.go deleted file mode 100644 index 2863b8510d..0000000000 --- a/vendor/github.com/PaesslerAG/gval/parse.go +++ /dev/null @@ -1,303 +0,0 @@ -package gval - -import ( - "context" - "fmt" - "reflect" - "strconv" - "text/scanner" -) - -//ParseExpression scans an expression into an Evaluable. -func (p *Parser) ParseExpression(c context.Context) (eval Evaluable, err error) { - stack := stageStack{} - for { - eval, err = p.ParseNextExpression(c) - if err != nil { - return nil, err - } - - if stage, err := p.parseOperator(c, &stack, eval); err != nil { - return nil, err - } else if err = stack.push(stage); err != nil { - return nil, err - } - - if stack.peek().infixBuilder == nil { - return stack.pop().Evaluable, nil - } - } -} - -//ParseNextExpression scans the expression ignoring following operators -func (p *Parser) ParseNextExpression(c context.Context) (eval Evaluable, err error) { - scan := p.Scan() - ex, ok := p.prefixes[scan] - if !ok { - return nil, p.Expected("extensions") - } - return ex(c, p) -} - -func parseString(c context.Context, p *Parser) (Evaluable, error) { - s, err := strconv.Unquote(p.TokenText()) - if err != nil { - return nil, fmt.Errorf("could not parse string: %s", err) - } - return p.Const(s), nil -} - -func parseNumber(c context.Context, p *Parser) (Evaluable, error) { - n, err := strconv.ParseFloat(p.TokenText(), 64) - if err != nil { - return nil, err - } - return p.Const(n), nil -} - -func parseParentheses(c context.Context, p *Parser) (Evaluable, error) { - eval, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - switch p.Scan() { - case ')': - return eval, nil - default: - return nil, p.Expected("parentheses", ')') - } -} - -func (p *Parser) parseOperator(c context.Context, stack *stageStack, eval Evaluable) (st stage, err error) { - for { - scan := p.Scan() - op := p.TokenText() - mustOp := false - if p.isSymbolOperation(scan) { - scan = p.Peek() - for p.isSymbolOperation(scan) { - mustOp = true - op += string(scan) - p.Next() - scan = p.Peek() - } - } else if scan != scanner.Ident { - p.Camouflage("operator") - return stage{Evaluable: eval}, nil - } - operator, _ := p.operators[op] - switch operator := operator.(type) { - case *infix: - return stage{ - Evaluable: eval, - infixBuilder: operator.builder, - operatorPrecedence: operator.operatorPrecedence, - }, nil - case directInfix: - return stage{ - Evaluable: eval, - infixBuilder: operator.infixBuilder, - operatorPrecedence: operator.operatorPrecedence, - }, nil - case postfix: - if err = stack.push(stage{ - operatorPrecedence: operator.operatorPrecedence, - Evaluable: eval, - }); err != nil { - return stage{}, err - } - eval, err = operator.f(c, p, stack.pop().Evaluable, operator.operatorPrecedence) - if err != nil { - return - } - continue - } - - if !mustOp { - p.Camouflage("operator") - return stage{Evaluable: eval}, nil - } - return stage{}, fmt.Errorf("unknown operator %s", op) - } -} - -func parseIdent(c context.Context, p *Parser) (call string, alternative func() (Evaluable, error), err error) { - token := p.TokenText() - return token, - func() (Evaluable, error) { - fullname := token - - keys := []Evaluable{p.Const(token)} - for { - scan := p.Scan() - switch scan { - case '.': - scan = p.Scan() - switch scan { - case scanner.Ident: - token = p.TokenText() - keys = append(keys, p.Const(token)) - default: - return nil, p.Expected("field", scanner.Ident) - } - case '(': - args, err := p.parseArguments(c) - if err != nil { - return nil, err - } - return p.callEvaluable(fullname, p.Var(keys...), args...), nil - case '[': - key, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - switch p.Scan() { - case ']': - keys = append(keys, key) - default: - return nil, p.Expected("array key", ']') - } - default: - p.Camouflage("variable", '.', '(', '[') - return p.Var(keys...), nil - } - } - }, nil - -} - -func (p *Parser) parseArguments(c context.Context) (args []Evaluable, err error) { - if p.Scan() == ')' { - return - } - p.Camouflage("scan arguments", ')') - for { - arg, err := p.ParseExpression(c) - args = append(args, arg) - if err != nil { - return nil, err - } - switch p.Scan() { - case ')': - return args, nil - case ',': - default: - return nil, p.Expected("arguments", ')', ',') - } - } -} - -func inArray(a, b interface{}) (interface{}, error) { - col, ok := b.([]interface{}) - if !ok { - return nil, fmt.Errorf("expected type []interface{} for in operator but got %T", b) - } - for _, value := range col { - if reflect.DeepEqual(a, value) { - return true, nil - } - } - return false, nil -} - -func parseIf(c context.Context, p *Parser, e Evaluable) (Evaluable, error) { - a, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - b := p.Const(nil) - switch p.Scan() { - case ':': - b, err = p.ParseExpression(c) - if err != nil { - return nil, err - } - case scanner.EOF: - default: - return nil, p.Expected("<> ? <> : <>", ':', scanner.EOF) - } - return func(c context.Context, v interface{}) (interface{}, error) { - x, err := e(c, v) - if err != nil { - return nil, err - } - if x == false || x == nil { - return b(c, v) - } - return a(c, v) - }, nil -} - -func parseJSONArray(c context.Context, p *Parser) (Evaluable, error) { - evals := []Evaluable{} - for { - switch p.Scan() { - default: - p.Camouflage("array", ',', ']') - eval, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - evals = append(evals, eval) - case ',': - case ']': - return func(c context.Context, v interface{}) (interface{}, error) { - vs := make([]interface{}, len(evals)) - for i, e := range evals { - eval, err := e(c, v) - if err != nil { - return nil, err - } - vs[i] = eval - } - - return vs, nil - }, nil - } - } -} - -func parseJSONObject(c context.Context, p *Parser) (Evaluable, error) { - type kv struct { - key Evaluable - value Evaluable - } - evals := []kv{} - for { - switch p.Scan() { - default: - p.Camouflage("object", ',', '}') - key, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - if p.Scan() != ':' { - if err != nil { - return nil, p.Expected("object", ':') - } - } - value, err := p.ParseExpression(c) - if err != nil { - return nil, err - } - evals = append(evals, kv{key, value}) - case ',': - case '}': - return func(c context.Context, v interface{}) (interface{}, error) { - vs := map[string]interface{}{} - for _, e := range evals { - value, err := e.value(c, v) - if err != nil { - return nil, err - } - key, err := e.key.EvalString(c, v) - if err != nil { - return nil, err - } - vs[key] = value - } - return vs, nil - }, nil - } - } -} diff --git a/vendor/github.com/PaesslerAG/gval/parser.go b/vendor/github.com/PaesslerAG/gval/parser.go deleted file mode 100644 index 54b1004d60..0000000000 --- a/vendor/github.com/PaesslerAG/gval/parser.go +++ /dev/null @@ -1,117 +0,0 @@ -package gval - -import ( - "bytes" - "fmt" - "strings" - "text/scanner" - "unicode" -) - -//Parser parses expressions in a Language into an Evaluable -type Parser struct { - scanner scanner.Scanner - Language - lastScan rune - camouflage error -} - -func newParser(expression string, l Language) *Parser { - sc := scanner.Scanner{} - sc.Init(strings.NewReader(expression)) - sc.Error = func(*scanner.Scanner, string) { return } - sc.IsIdentRune = func(r rune, pos int) bool { return unicode.IsLetter(r) || r == '_' || (pos > 0 && unicode.IsDigit(r)) } - sc.Filename = expression + "\t" - return &Parser{scanner: sc, Language: l} -} - -// Scan reads the next token or Unicode character from source and returns it. -// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set. -// It returns scanner.EOF at the end of the source. -func (p *Parser) Scan() rune { - if p.isCamouflaged() { - p.camouflage = nil - return p.lastScan - } - p.camouflage = nil - p.lastScan = p.scanner.Scan() - return p.lastScan -} - -func (p *Parser) isCamouflaged() bool { - return p.camouflage != nil && p.camouflage != errCamouflageAfterNext -} - -// Camouflage rewind the last Scan(). The Parser holds the camouflage error until -// the next Scan() -// Do not call Rewind() on a camouflaged Parser -func (p *Parser) Camouflage(unit string, expected ...rune) { - if p.isCamouflaged() { - panic(fmt.Errorf("can only Camouflage() after Scan(): %v", p.camouflage)) - } - p.camouflage = p.Expected(unit, expected...) - return -} - -// Peek returns the next Unicode character in the source without advancing -// the scanner. It returns EOF if the scanner's position is at the last -// character of the source. -// Do not call Peek() on a camouflaged Parser -func (p *Parser) Peek() rune { - if p.isCamouflaged() { - panic("can not Peek() on camouflaged Parser") - } - return p.scanner.Peek() -} - -var errCamouflageAfterNext = fmt.Errorf("Camouflage() after Next()") - -// Next reads and returns the next Unicode character. -// It returns EOF at the end of the source. -// Do not call Next() on a camouflaged Parser -func (p *Parser) Next() rune { - if p.isCamouflaged() { - panic("can not Next() on camouflaged Parser") - } - p.camouflage = errCamouflageAfterNext - return p.scanner.Next() -} - -// TokenText returns the string corresponding to the most recently scanned token. -// Valid after calling Scan(). -func (p *Parser) TokenText() string { - return p.scanner.TokenText() -} - -//Expected returns an error signaling an unexpected Scan() result -func (p *Parser) Expected(unit string, expected ...rune) error { - return unexpectedRune{unit, expected, p.lastScan} -} - -type unexpectedRune struct { - unit string - expected []rune - got rune -} - -func (err unexpectedRune) Error() string { - exp := bytes.Buffer{} - runes := err.expected - switch len(runes) { - default: - for _, r := range runes[:len(runes)-2] { - exp.WriteString(scanner.TokenString(r)) - exp.WriteString(", ") - } - fallthrough - case 2: - exp.WriteString(scanner.TokenString(runes[len(runes)-2])) - exp.WriteString(" or ") - fallthrough - case 1: - exp.WriteString(scanner.TokenString(runes[len(runes)-1])) - case 0: - return fmt.Sprintf("unexpected %s while scanning %s", scanner.TokenString(err.got), err.unit) - } - return fmt.Sprintf("unexpected %s while scanning %s expected %s", scanner.TokenString(err.got), err.unit, exp.String()) -} diff --git a/vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png b/vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png deleted file mode 100644 index 7c23b52b76..0000000000 Binary files a/vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png and /dev/null differ diff --git a/vendor/github.com/PaesslerAG/jsonpath/.gitignore b/vendor/github.com/PaesslerAG/jsonpath/.gitignore deleted file mode 100644 index 98576e3004..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -coverage.out - -manual_test.go -*.out -*.err - -.vscode \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/jsonpath/.travis.yml b/vendor/github.com/PaesslerAG/jsonpath/.travis.yml deleted file mode 100644 index 3790c17601..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -script: ./test.sh - -go: - - 1.9 diff --git a/vendor/github.com/PaesslerAG/jsonpath/LICENSE b/vendor/github.com/PaesslerAG/jsonpath/LICENSE deleted file mode 100644 index 0716dbca1e..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2017, Paessler AG -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/jsonpath/README.md b/vendor/github.com/PaesslerAG/jsonpath/README.md deleted file mode 100644 index 0919a775c0..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/README.md +++ /dev/null @@ -1,11 +0,0 @@ -JSONPath -==== - -[![Build Status](https://api.travis-ci.org/PaesslerAG/jsonpath.svg?branch=master)](https://travis-ci.org/PaesslerAG/jsonpath) -[![Godoc](https://godoc.org/github.com/PaesslerAG/jsonpath?status.png)](https://godoc.org/github.com/PaesslerAG/jsonpath) - -JSONPath is a complete implementation of [http://goessner.net/articles/JsonPath/](http://goessner.net/articles/JsonPath/). -JSONPath can be combined with a script language. In many web samples it's combined with javascript. This framework comes without a script language but can be easily extended with one. See [example](https://godoc.org/github.com/PaesslerAG/jsonpath#example-package--Gval). - -It is based on [Gval](https://github.com/PaesslerAG/gval) and can be combined with the modular expression languages based on gval. -So for script features like multiply, length, regex or many more take a look at the documentation in the [GoDoc](https://godoc.org/github.com/PaesslerAG/jsonpath). \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go b/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go deleted file mode 100644 index cada95c9a7..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package jsonpath is an implementation of http://goessner.net/articles/JsonPath/ -// If a JSONPath contains one of -// [key1, key2 ...], .., *, [min:max], [min:max:step], (? expression) -// all matchs are listed in an []interface{} -// -// The package comes with an extension of JSONPath to access the wildcard values of a match. -// If the JSONPath is used inside of a JSON object, you can use placeholder '#' or '#i' with natural number i -// to access all wildcards values or the ith wildcard -// -// This package can be extended with gval modules for script features like multiply, length, regex or many more. -// So take a look at github.com/PaesslerAG/gval. -package jsonpath - -import ( - "context" - - "github.com/PaesslerAG/gval" -) - -// New returns an selector for given JSONPath -func New(path string) (gval.Evaluable, error) { - return lang.NewEvaluable(path) -} - -//Get executes given JSONPath on given value -func Get(path string, value interface{}) (interface{}, error) { - eval, err := lang.NewEvaluable(path) - if err != nil { - return nil, err - } - return eval(context.Background(), value) -} - -var lang = gval.NewLanguage( - gval.Base(), - gval.PrefixExtension('$', parseRootPath), - gval.PrefixExtension('@', parseCurrentPath), -) - -//Language is the JSONPath Language -func Language() gval.Language { - return lang -} - -var placeholderExtension = gval.NewLanguage( - lang, - gval.PrefixExtension('{', parseJSONObject), - gval.PrefixExtension('#', parsePlaceholder), -) - -//PlaceholderExtension is the JSONPath Language with placeholder -func PlaceholderExtension() gval.Language { - return placeholderExtension -} diff --git a/vendor/github.com/PaesslerAG/jsonpath/parse.go b/vendor/github.com/PaesslerAG/jsonpath/parse.go deleted file mode 100644 index 18a4cb37cb..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/parse.go +++ /dev/null @@ -1,204 +0,0 @@ -package jsonpath - -import ( - "context" - "fmt" - "math" - "text/scanner" - - "github.com/PaesslerAG/gval" -) - -type parser struct { - *gval.Parser - path path -} - -func parseRootPath(ctx context.Context, gParser *gval.Parser) (r gval.Evaluable, err error) { - p := newParser(gParser) - return p.parse(ctx) -} - -func parseCurrentPath(ctx context.Context, gParser *gval.Parser) (r gval.Evaluable, err error) { - p := newParser(gParser) - p.appendPlainSelector(currentElementSelector()) - return p.parse(ctx) -} - -func newParser(p *gval.Parser) *parser { - return &parser{Parser: p, path: plainPath{}} -} - -func (p *parser) parse(c context.Context) (r gval.Evaluable, err error) { - err = p.parsePath(c) - - if err != nil { - return nil, err - } - return p.path.evaluate, nil -} - -func (p *parser) parsePath(c context.Context) error { - switch p.Scan() { - case '.': - return p.parseSelect(c) - case '[': - keys, seperator, err := p.parseBracket(c) - - if err != nil { - return err - } - - switch seperator { - case ':': - if len(keys) > 3 { - return fmt.Errorf("range query has at least the parameter [min:max:step]") - } - keys = append(keys, []gval.Evaluable{ - p.Const(0), p.Const(float64(math.MaxInt32)), p.Const(1)}[len(keys):]...) - p.appendAmbiguousSelector(rangeSelector(keys[0], keys[1], keys[2])) - case '?': - if len(keys) != 1 { - return fmt.Errorf("filter needs exactly one key") - } - p.appendAmbiguousSelector(filterSelector(keys[0])) - default: - if len(keys) == 1 { - p.appendPlainSelector(directSelector(keys[0])) - } else { - p.appendAmbiguousSelector(multiSelector(keys)) - } - } - return p.parsePath(c) - case '(': - return p.parseScript(c) - default: - p.Camouflage("jsonpath", '.', '[', '(') - return nil - } -} - -func (p *parser) parseSelect(c context.Context) error { - scan := p.Scan() - switch scan { - case scanner.Ident: - p.appendPlainSelector(directSelector(p.Const(p.TokenText()))) - return p.parsePath(c) - case '.': - p.appendAmbiguousSelector(mapperSelector()) - return p.parseMapper(c) - case '*': - p.appendAmbiguousSelector(starSelector()) - return p.parsePath(c) - default: - return p.Expected("JSON select", scanner.Ident, '.', '*') - } -} - -func (p *parser) parseBracket(c context.Context) (keys []gval.Evaluable, seperator rune, err error) { - for { - scan := p.Scan() - skipScan := false - switch scan { - case '?': - skipScan = true - case ':': - i := float64(0) - if len(keys) == 1 { - i = math.MaxInt32 - } - keys = append(keys, p.Const(i)) - skipScan = true - case '*': - if p.Scan() != ']' { - return nil, 0, p.Expected("JSON bracket star", ']') - } - return []gval.Evaluable{}, 0, nil - case ']': - if seperator == ':' { - skipScan = true - break - } - fallthrough - default: - p.Camouflage("jsonpath brackets") - key, err := p.ParseExpression(c) - if err != nil { - return nil, 0, err - } - keys = append(keys, key) - } - if !skipScan { - scan = p.Scan() - } - if seperator == 0 { - seperator = scan - } - switch scan { - case ':', ',': - case ']': - return - case '?': - if len(keys) != 0 { - return nil, 0, p.Expected("JSON filter", ']') - } - default: - return nil, 0, p.Expected("JSON bracket separator", ':', ',') - } - if seperator != scan { - return nil, 0, fmt.Errorf("mixed %v and %v in JSON bracket", seperator, scan) - } - } -} - -func (p *parser) parseMapper(c context.Context) error { - scan := p.Scan() - switch scan { - case scanner.Ident: - p.appendPlainSelector(directSelector(p.Const(p.TokenText()))) - case '[': - keys, seperator, err := p.parseBracket(c) - - if err != nil { - return err - } - switch seperator { - case ':': - return fmt.Errorf("mapper can not be combined with range query") - case '?': - if len(keys) != 1 { - return fmt.Errorf("filter needs exactly one key") - } - p.appendAmbiguousSelector(filterSelector(keys[0])) - default: - p.appendAmbiguousSelector(multiSelector(keys)) - } - case '*': - p.appendAmbiguousSelector(starSelector()) - case '(': - return p.parseScript(c) - default: - return p.Expected("JSON mapper", '[', scanner.Ident, '*') - } - return p.parsePath(c) -} - -func (p *parser) parseScript(c context.Context) error { - script, err := p.ParseExpression(c) - if err != nil { - return err - } - if p.Scan() != ')' { - return p.Expected("jsnopath script", ')') - } - p.appendPlainSelector(newScript(script)) - return p.parsePath(c) -} - -func (p *parser) appendPlainSelector(next plainSelector) { - p.path = p.path.withPlainSelector(next) -} - -func (p *parser) appendAmbiguousSelector(next ambiguousSelector) { - p.path = p.path.withAmbiguousSelector(next) -} diff --git a/vendor/github.com/PaesslerAG/jsonpath/path.go b/vendor/github.com/PaesslerAG/jsonpath/path.go deleted file mode 100644 index b8e784d842..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/path.go +++ /dev/null @@ -1,103 +0,0 @@ -package jsonpath - -import "context" - -type path interface { - evaluate(c context.Context, parameter interface{}) (interface{}, error) - visitMatchs(c context.Context, r interface{}, visit pathMatcher) - withPlainSelector(plainSelector) path - withAmbiguousSelector(ambiguousSelector) path -} - -type plainPath []plainSelector - -type ambiguousMatcher func(key, v interface{}) - -func (p plainPath) evaluate(ctx context.Context, root interface{}) (interface{}, error) { - return p.evaluatePath(ctx, root, root) -} - -func (p plainPath) evaluatePath(ctx context.Context, root, value interface{}) (interface{}, error) { - var err error - for _, sel := range p { - value, err = sel(ctx, root, value) - if err != nil { - return nil, err - } - } - return value, nil -} - -func (p plainPath) matcher(ctx context.Context, r interface{}, match ambiguousMatcher) ambiguousMatcher { - if len(p) == 0 { - return match - } - return func(k, v interface{}) { - res, err := p.evaluatePath(ctx, r, v) - if err == nil { - match(k, res) - } - } -} - -func (p plainPath) visitMatchs(ctx context.Context, r interface{}, visit pathMatcher) { - res, err := p.evaluatePath(ctx, r, r) - if err == nil { - visit(nil, res) - } -} - -func (p plainPath) withPlainSelector(selector plainSelector) path { - return append(p, selector) -} -func (p plainPath) withAmbiguousSelector(selector ambiguousSelector) path { - return &ambiguousPath{ - parent: p, - branch: selector, - } -} - -type ambiguousPath struct { - parent path - branch ambiguousSelector - ending plainPath -} - -func (p *ambiguousPath) evaluate(ctx context.Context, parameter interface{}) (interface{}, error) { - matchs := []interface{}{} - p.visitMatchs(ctx, parameter, func(keys []interface{}, match interface{}) { - matchs = append(matchs, match) - }) - return matchs, nil -} - -func (p *ambiguousPath) visitMatchs(ctx context.Context, r interface{}, visit pathMatcher) { - p.parent.visitMatchs(ctx, r, func(keys []interface{}, v interface{}) { - p.branch(ctx, r, v, p.ending.matcher(ctx, r, visit.matcher(keys))) - }) -} - -func (p *ambiguousPath) branchMatcher(ctx context.Context, r interface{}, m ambiguousMatcher) ambiguousMatcher { - return func(k, v interface{}) { - p.branch(ctx, r, v, m) - } -} - -func (p *ambiguousPath) withPlainSelector(selector plainSelector) path { - p.ending = append(p.ending, selector) - return p -} -func (p *ambiguousPath) withAmbiguousSelector(selector ambiguousSelector) path { - return &ambiguousPath{ - parent: p, - branch: selector, - } -} - -type pathMatcher func(keys []interface{}, match interface{}) - -func (m pathMatcher) matcher(keys []interface{}) ambiguousMatcher { - return func(key, match interface{}) { - m(append(keys, key), match) - } -} diff --git a/vendor/github.com/PaesslerAG/jsonpath/placeholder.go b/vendor/github.com/PaesslerAG/jsonpath/placeholder.go deleted file mode 100644 index d1cd063f4c..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/placeholder.go +++ /dev/null @@ -1,181 +0,0 @@ -package jsonpath - -import ( - "bytes" - "context" - "fmt" - "strconv" - "text/scanner" - - "github.com/PaesslerAG/gval" -) - -type keyValueVisitor func(key string, value interface{}) - -type jsonObject interface { - visitElements(c context.Context, v interface{}, visit keyValueVisitor) error -} - -type jsonObjectSlice []jsonObject - -type keyValuePair struct { - key gval.Evaluable - value gval.Evaluable -} - -type keyValueMatcher struct { - key gval.Evaluable - matcher func(c context.Context, r interface{}, visit pathMatcher) -} - -func parseJSONObject(ctx context.Context, p *gval.Parser) (gval.Evaluable, error) { - evals := jsonObjectSlice{} - for { - switch p.Scan() { - default: - hasWildcard := false - - p.Camouflage("object", ',', '}') - key, err := p.ParseExpression(context.WithValue(ctx, hasPlaceholdersContextKey{}, &hasWildcard)) - if err != nil { - return nil, err - } - if p.Scan() != ':' { - if err != nil { - return nil, p.Expected("object", ':') - } - } - e, err := parseJSONObjectElement(ctx, p, hasWildcard, key) - if err != nil { - return nil, err - } - evals.addElements(e) - case ',': - case '}': - return evals.evaluable, nil - } - } -} - -func parseJSONObjectElement(ctx context.Context, gParser *gval.Parser, hasWildcard bool, key gval.Evaluable) (jsonObject, error) { - if hasWildcard { - p := newParser(gParser) - switch gParser.Scan() { - case '$': - case '@': - p.appendPlainSelector(currentElementSelector()) - default: - return nil, p.Expected("JSONPath key and value") - } - - if err := p.parsePath(ctx); err != nil { - return nil, err - } - return keyValueMatcher{key, p.path.visitMatchs}, nil - } - value, err := gParser.ParseExpression(ctx) - if err != nil { - return nil, err - } - return keyValuePair{key, value}, nil -} - -func (kv keyValuePair) visitElements(c context.Context, v interface{}, visit keyValueVisitor) error { - value, err := kv.value(c, v) - if err != nil { - return err - } - key, err := kv.key.EvalString(c, v) - if err != nil { - return err - } - visit(key, value) - return nil -} - -func (kv keyValueMatcher) visitElements(c context.Context, v interface{}, visit keyValueVisitor) (err error) { - kv.matcher(c, v, func(keys []interface{}, match interface{}) { - key, er := kv.key.EvalString(context.WithValue(c, placeholdersContextKey{}, keys), v) - if er != nil { - err = er - } - visit(key, match) - }) - return -} - -func (j *jsonObjectSlice) addElements(e jsonObject) { - *j = append(*j, e) -} - -func (j jsonObjectSlice) evaluable(c context.Context, v interface{}) (interface{}, error) { - vs := map[string]interface{}{} - - err := j.visitElements(c, v, func(key string, value interface{}) { vs[key] = value }) - if err != nil { - return nil, err - } - return vs, nil -} - -func (j jsonObjectSlice) visitElements(ctx context.Context, v interface{}, visit keyValueVisitor) (err error) { - for _, e := range j { - if err := e.visitElements(ctx, v, visit); err != nil { - return err - } - } - return nil -} - -func parsePlaceholder(c context.Context, p *gval.Parser) (gval.Evaluable, error) { - hasWildcard := c.Value(hasPlaceholdersContextKey{}) - if hasWildcard == nil { - return nil, fmt.Errorf("JSONPath placeholder must only be used in an JSON object key") - } - *(hasWildcard.(*bool)) = true - switch p.Scan() { - case scanner.Int: - id, err := strconv.Atoi(p.TokenText()) - if err != nil { - return nil, err - } - return placeholder(id).evaluable, nil - default: - p.Camouflage("JSONPath placeholder") - return allPlaceholders.evaluable, nil - } -} - -type hasPlaceholdersContextKey struct{} - -type placeholdersContextKey struct{} - -type placeholder int - -const allPlaceholders = placeholder(-1) - -func (key placeholder) evaluable(c context.Context, v interface{}) (interface{}, error) { - wildcards, ok := c.Value(placeholdersContextKey{}).([]interface{}) - if !ok || len(wildcards) <= int(key) { - return nil, fmt.Errorf("JSONPath placeholder #%d is not available", key) - } - if key == allPlaceholders { - sb := bytes.Buffer{} - sb.WriteString("$") - quoteWildcardValues(&sb, wildcards) - return sb.String(), nil - } - return wildcards[int(key)], nil -} - -func quoteWildcardValues(sb *bytes.Buffer, wildcards []interface{}) { - for _, w := range wildcards { - if wildcards, ok := w.([]interface{}); ok { - quoteWildcardValues(sb, wildcards) - continue - } - sb.WriteString(fmt.Sprintf("[%v]", - strconv.Quote(fmt.Sprint(w)), - )) - } -} diff --git a/vendor/github.com/PaesslerAG/jsonpath/selector.go b/vendor/github.com/PaesslerAG/jsonpath/selector.go deleted file mode 100644 index 46670c249a..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/selector.go +++ /dev/null @@ -1,203 +0,0 @@ -package jsonpath - -import ( - "context" - "fmt" - "strconv" - - "github.com/PaesslerAG/gval" -) - -//plainSelector evaluate exactly one result -type plainSelector func(c context.Context, r, v interface{}) (interface{}, error) - -//ambiguousSelector evaluate wildcard -type ambiguousSelector func(c context.Context, r, v interface{}, match ambiguousMatcher) - -//@ -func currentElementSelector() plainSelector { - return func(c context.Context, r, v interface{}) (interface{}, error) { - return c.Value(currentElement{}), nil - } -} - -type currentElement struct{} - -func currentContext(c context.Context, v interface{}) context.Context { - return context.WithValue(c, currentElement{}, v) -} - -//.x, [x] -func directSelector(key gval.Evaluable) plainSelector { - return func(c context.Context, r, v interface{}) (interface{}, error) { - - e, _, err := selectValue(c, key, r, v) - if err != nil { - return nil, err - } - - return e, nil - } -} - -// * / [*] -func starSelector() ambiguousSelector { - return func(c context.Context, r, v interface{}, match ambiguousMatcher) { - visitAll(v, func(key string, val interface{}) { match(key, val) }) - } -} - -// [x, ...] -func multiSelector(keys []gval.Evaluable) ambiguousSelector { - if len(keys) == 0 { - return starSelector() - } - return func(c context.Context, r, v interface{}, match ambiguousMatcher) { - for _, k := range keys { - e, wildcard, err := selectValue(c, k, r, v) - if err != nil { - continue - } - match(wildcard, e) - } - } -} - -func selectValue(c context.Context, key gval.Evaluable, r, v interface{}) (value interface{}, jkey string, err error) { - c = currentContext(c, v) - switch o := v.(type) { - case []interface{}: - i, err := key.EvalInt(c, r) - if err != nil { - return nil, "", fmt.Errorf("could not select value, invalid key: %s", err) - } - if i < 0 || i >= len(o) { - return nil, "", fmt.Errorf("index %d out of bounds", i) - } - return o[i], strconv.Itoa(i), nil - case map[string]interface{}: - k, err := key.EvalString(c, r) - if err != nil { - return nil, "", fmt.Errorf("could not select value, invalid key: %s", err) - } - - if r, ok := o[k]; ok { - return r, k, nil - } - return nil, "", fmt.Errorf("unknown key %s", k) - - default: - return nil, "", fmt.Errorf("unsupported value type %T for select, expected map[string]interface{} or []interface{}", o) - } -} - -//.. -func mapperSelector() ambiguousSelector { - return mapper -} - -func mapper(c context.Context, r, v interface{}, match ambiguousMatcher) { - match([]interface{}{}, v) - visitAll(v, func(wildcard string, v interface{}) { - mapper(c, r, v, func(key interface{}, v interface{}) { - match(append([]interface{}{wildcard}, key.([]interface{})...), v) - }) - }) -} - -func visitAll(v interface{}, visit func(key string, v interface{})) { - switch v := v.(type) { - case []interface{}: - for i, e := range v { - k := strconv.Itoa(i) - visit(k, e) - } - case map[string]interface{}: - for k, e := range v { - visit(k, e) - } - } - -} - -//[? ] -func filterSelector(filter gval.Evaluable) ambiguousSelector { - return func(c context.Context, r, v interface{}, match ambiguousMatcher) { - visitAll(v, func(wildcard string, v interface{}) { - condition, err := filter.EvalBool(currentContext(c, v), r) - if err != nil { - return - } - if condition { - match(wildcard, v) - } - }) - } -} - -//[::] -func rangeSelector(min, max, step gval.Evaluable) ambiguousSelector { - return func(c context.Context, r, v interface{}, match ambiguousMatcher) { - cs, ok := v.([]interface{}) - if !ok { - return - } - - c = currentContext(c, v) - - min, err := min.EvalInt(c, r) - if err != nil { - return - } - max, err := max.EvalInt(c, r) - if err != nil { - return - } - step, err := step.EvalInt(c, r) - if err != nil { - return - } - - if min > max { - return - } - - n := len(cs) - min = negmax(min, n) - max = negmax(max, n) - - if step == 0 { - step = 1 - } - - if step > 0 { - for i := min; i < max; i += step { - match(strconv.Itoa(i), cs[i]) - } - } else { - for i := max - 1; i >= min; i += step { - match(strconv.Itoa(i), cs[i]) - } - } - - } -} - -func negmax(n, max int) int { - if n < 0 { - n = max + n - if n < 0 { - n = 0 - } - } else if n > max { - return max - } - return n -} - -// () -func newScript(script gval.Evaluable) plainSelector { - return func(c context.Context, r, v interface{}) (interface{}, error) { - return script(currentContext(c, v), r) - } -} diff --git a/vendor/github.com/PaesslerAG/jsonpath/test.sh b/vendor/github.com/PaesslerAG/jsonpath/test.sh deleted file mode 100644 index 09ae6b98e5..0000000000 --- a/vendor/github.com/PaesslerAG/jsonpath/test.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# Script that runs tests, code coverage, and benchmarks all at once. - -JSONPath_PATH=$HOME/gopath/src/github.com/PaesslerAG/jsonpath - -# run the actual tests. -cd "${JSONPath_PATH}" -go test -bench=. -benchmem -coverprofile coverage.out -status=$? - -if [ "${status}" != 0 ]; -then - exit $status -fi diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/.travis.yml b/vendor/github.com/ReneKroon/ttlcache/v2/.travis.yml deleted file mode 100644 index 06007d3691..0000000000 --- a/vendor/github.com/ReneKroon/ttlcache/v2/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -go: - - "1.17.x" - - "1.16.x" - -git: - depth: 1 - -install: - - go install -race std - - go install golang.org/x/tools/cmd/cover - - go install golang.org/x/lint/golint - - export PATH=$HOME/gopath/bin:$PATH - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls - -script: - - golint . - - go test -cover -race -count=1 -timeout=30s -run . - - go test -covermode=count -coverprofile=coverage.out -timeout=90s -run . - - if test ! -z "$COVERALLS_TOKEN"; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi - - cd bench; go test -run=Bench.* -bench=. -benchmem; cd .. \ No newline at end of file diff --git a/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go new file mode 100644 index 0000000000..1d47c93aae --- /dev/null +++ b/vendor/github.com/alibabacloud-go/alibabacloud-gateway-spi/client/client.go @@ -0,0 +1,305 @@ +// This file is auto-generated, don't edit it. Thanks. +package client + +import ( + "io" + + "github.com/alibabacloud-go/tea/tea" + credential "github.com/aliyun/credentials-go/credentials" +) + +type InterceptorContext struct { + Request *InterceptorContextRequest `json:"request,omitempty" xml:"request,omitempty" require:"true" type:"Struct"` + Configuration *InterceptorContextConfiguration `json:"configuration,omitempty" xml:"configuration,omitempty" require:"true" type:"Struct"` + Response *InterceptorContextResponse `json:"response,omitempty" xml:"response,omitempty" require:"true" type:"Struct"` +} + +func (s InterceptorContext) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContext) GoString() string { + return s.String() +} + +func (s *InterceptorContext) SetRequest(v *InterceptorContextRequest) *InterceptorContext { + s.Request = v + return s +} + +func (s *InterceptorContext) SetConfiguration(v *InterceptorContextConfiguration) *InterceptorContext { + s.Configuration = v + return s +} + +func (s *InterceptorContext) SetResponse(v *InterceptorContextResponse) *InterceptorContext { + s.Response = v + return s +} + +type InterceptorContextRequest struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"` + Body interface{} `json:"body,omitempty" xml:"body,omitempty"` + Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"` + HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"` + Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"` + ProductId *string `json:"productId,omitempty" xml:"productId,omitempty" require:"true"` + Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"` + Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"` + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"` + Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"` + AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"` + BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"` + ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"` + Style *string `json:"style,omitempty" xml:"style,omitempty"` + Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty" require:"true"` + SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"` + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"` + UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty" require:"true"` +} + +func (s InterceptorContextRequest) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextRequest) GoString() string { + return s.String() +} + +func (s *InterceptorContextRequest) SetHeaders(v map[string]*string) *InterceptorContextRequest { + s.Headers = v + return s +} + +func (s *InterceptorContextRequest) SetQuery(v map[string]*string) *InterceptorContextRequest { + s.Query = v + return s +} + +func (s *InterceptorContextRequest) SetBody(v interface{}) *InterceptorContextRequest { + s.Body = v + return s +} + +func (s *InterceptorContextRequest) SetStream(v io.Reader) *InterceptorContextRequest { + s.Stream = v + return s +} + +func (s *InterceptorContextRequest) SetHostMap(v map[string]*string) *InterceptorContextRequest { + s.HostMap = v + return s +} + +func (s *InterceptorContextRequest) SetPathname(v string) *InterceptorContextRequest { + s.Pathname = &v + return s +} + +func (s *InterceptorContextRequest) SetProductId(v string) *InterceptorContextRequest { + s.ProductId = &v + return s +} + +func (s *InterceptorContextRequest) SetAction(v string) *InterceptorContextRequest { + s.Action = &v + return s +} + +func (s *InterceptorContextRequest) SetVersion(v string) *InterceptorContextRequest { + s.Version = &v + return s +} + +func (s *InterceptorContextRequest) SetProtocol(v string) *InterceptorContextRequest { + s.Protocol = &v + return s +} + +func (s *InterceptorContextRequest) SetMethod(v string) *InterceptorContextRequest { + s.Method = &v + return s +} + +func (s *InterceptorContextRequest) SetAuthType(v string) *InterceptorContextRequest { + s.AuthType = &v + return s +} + +func (s *InterceptorContextRequest) SetBodyType(v string) *InterceptorContextRequest { + s.BodyType = &v + return s +} + +func (s *InterceptorContextRequest) SetReqBodyType(v string) *InterceptorContextRequest { + s.ReqBodyType = &v + return s +} + +func (s *InterceptorContextRequest) SetStyle(v string) *InterceptorContextRequest { + s.Style = &v + return s +} + +func (s *InterceptorContextRequest) SetCredential(v credential.Credential) *InterceptorContextRequest { + s.Credential = v + return s +} + +func (s *InterceptorContextRequest) SetSignatureVersion(v string) *InterceptorContextRequest { + s.SignatureVersion = &v + return s +} + +func (s *InterceptorContextRequest) SetSignatureAlgorithm(v string) *InterceptorContextRequest { + s.SignatureAlgorithm = &v + return s +} + +func (s *InterceptorContextRequest) SetUserAgent(v string) *InterceptorContextRequest { + s.UserAgent = &v + return s +} + +type InterceptorContextConfiguration struct { + RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty" require:"true"` + Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"` + EndpointRule *string `json:"endpointRule,omitempty" xml:"endpointRule,omitempty"` + EndpointMap map[string]*string `json:"endpointMap,omitempty" xml:"endpointMap,omitempty"` + EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"` + Network *string `json:"network,omitempty" xml:"network,omitempty"` + Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"` +} + +func (s InterceptorContextConfiguration) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextConfiguration) GoString() string { + return s.String() +} + +func (s *InterceptorContextConfiguration) SetRegionId(v string) *InterceptorContextConfiguration { + s.RegionId = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpoint(v string) *InterceptorContextConfiguration { + s.Endpoint = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointRule(v string) *InterceptorContextConfiguration { + s.EndpointRule = &v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointMap(v map[string]*string) *InterceptorContextConfiguration { + s.EndpointMap = v + return s +} + +func (s *InterceptorContextConfiguration) SetEndpointType(v string) *InterceptorContextConfiguration { + s.EndpointType = &v + return s +} + +func (s *InterceptorContextConfiguration) SetNetwork(v string) *InterceptorContextConfiguration { + s.Network = &v + return s +} + +func (s *InterceptorContextConfiguration) SetSuffix(v string) *InterceptorContextConfiguration { + s.Suffix = &v + return s +} + +type InterceptorContextResponse struct { + StatusCode *int `json:"statusCode,omitempty" xml:"statusCode,omitempty"` + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Body io.Reader `json:"body,omitempty" xml:"body,omitempty"` + DeserializedBody interface{} `json:"deserializedBody,omitempty" xml:"deserializedBody,omitempty"` +} + +func (s InterceptorContextResponse) String() string { + return tea.Prettify(s) +} + +func (s InterceptorContextResponse) GoString() string { + return s.String() +} + +func (s *InterceptorContextResponse) SetStatusCode(v int) *InterceptorContextResponse { + s.StatusCode = &v + return s +} + +func (s *InterceptorContextResponse) SetHeaders(v map[string]*string) *InterceptorContextResponse { + s.Headers = v + return s +} + +func (s *InterceptorContextResponse) SetBody(v io.Reader) *InterceptorContextResponse { + s.Body = v + return s +} + +func (s *InterceptorContextResponse) SetDeserializedBody(v interface{}) *InterceptorContextResponse { + s.DeserializedBody = v + return s +} + +type AttributeMap struct { + Attributes map[string]interface{} `json:"attributes,omitempty" xml:"attributes,omitempty" require:"true"` + Key map[string]*string `json:"key,omitempty" xml:"key,omitempty" require:"true"` +} + +func (s AttributeMap) String() string { + return tea.Prettify(s) +} + +func (s AttributeMap) GoString() string { + return s.String() +} + +func (s *AttributeMap) SetAttributes(v map[string]interface{}) *AttributeMap { + s.Attributes = v + return s +} + +func (s *AttributeMap) SetKey(v map[string]*string) *AttributeMap { + s.Key = v + return s +} + +type ClientInterface interface { + ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) error + ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) error + ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) error +} + +type Client struct { +} + +func NewClient() (*Client, error) { + client := new(Client) + err := client.Init() + return client, err +} + +func (client *Client) Init() (_err error) { + return nil +} + +func (client *Client) ModifyConfiguration(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} + +func (client *Client) ModifyRequest(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} + +func (client *Client) ModifyResponse(context *InterceptorContext, attributeMap *AttributeMap) (_err error) { + panic("No Support!") +} diff --git a/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go b/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go new file mode 100644 index 0000000000..d2179d673b --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20160607/client/client.go @@ -0,0 +1,2513 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * + */ +package client + +import ( + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + endpointutil "github.com/alibabacloud-go/endpoint-util/service" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" +) + +type CancelRepoBuildResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CancelRepoBuildResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildResponse) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildResponse) SetHeaders(v map[string]*string) *CancelRepoBuildResponse { + s.Headers = v + return s +} + +type CreateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponse) SetHeaders(v map[string]*string) *CreateNamespaceResponse { + s.Headers = v + return s +} + +type CreateRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoResponse) SetHeaders(v map[string]*string) *CreateRepoResponse { + s.Headers = v + return s +} + +type CreateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *CreateRepoBuildRuleResponse { + s.Headers = v + return s +} + +type CreateRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoWebhookResponse) SetHeaders(v map[string]*string) *CreateRepoWebhookResponse { + s.Headers = v + return s +} + +type CreateUserInfoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s CreateUserInfoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateUserInfoResponse) GoString() string { + return s.String() +} + +func (s *CreateUserInfoResponse) SetHeaders(v map[string]*string) *CreateUserInfoResponse { + s.Headers = v + return s +} + +type DeleteImageResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteImageResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteImageResponse) GoString() string { + return s.String() +} + +func (s *DeleteImageResponse) SetHeaders(v map[string]*string) *DeleteImageResponse { + s.Headers = v + return s +} + +type DeleteNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponse) SetHeaders(v map[string]*string) *DeleteNamespaceResponse { + s.Headers = v + return s +} + +type DeleteRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoResponse) SetHeaders(v map[string]*string) *DeleteRepoResponse { + s.Headers = v + return s +} + +type DeleteRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoBuildRuleResponse { + s.Headers = v + return s +} + +type DeleteRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s DeleteRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoWebhookResponse) SetHeaders(v map[string]*string) *DeleteRepoWebhookResponse { + s.Headers = v + return s +} + +type GetAuthorizationTokenResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetAuthorizationTokenResponse) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponse) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponse) SetHeaders(v map[string]*string) *GetAuthorizationTokenResponse { + s.Headers = v + return s +} + +type GetImageLayerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetImageLayerResponse) String() string { + return tea.Prettify(s) +} + +func (s GetImageLayerResponse) GoString() string { + return s.String() +} + +func (s *GetImageLayerResponse) SetHeaders(v map[string]*string) *GetImageLayerResponse { + s.Headers = v + return s +} + +type GetImageManifestRequest struct { + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` +} + +func (s GetImageManifestRequest) String() string { + return tea.Prettify(s) +} + +func (s GetImageManifestRequest) GoString() string { + return s.String() +} + +func (s *GetImageManifestRequest) SetSchemaVersion(v int32) *GetImageManifestRequest { + s.SchemaVersion = &v + return s +} + +type GetImageManifestResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetImageManifestResponse) String() string { + return tea.Prettify(s) +} + +func (s GetImageManifestResponse) GoString() string { + return s.String() +} + +func (s *GetImageManifestResponse) SetHeaders(v map[string]*string) *GetImageManifestResponse { + s.Headers = v + return s +} + +type GetNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponse) SetHeaders(v map[string]*string) *GetNamespaceResponse { + s.Headers = v + return s +} + +type GetNamespaceListRequest struct { + Authorize *string `json:"Authorize,omitempty" xml:"Authorize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetNamespaceListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceListRequest) GoString() string { + return s.String() +} + +func (s *GetNamespaceListRequest) SetAuthorize(v string) *GetNamespaceListRequest { + s.Authorize = &v + return s +} + +func (s *GetNamespaceListRequest) SetStatus(v string) *GetNamespaceListRequest { + s.Status = &v + return s +} + +type GetNamespaceListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetNamespaceListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceListResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceListResponse) SetHeaders(v map[string]*string) *GetNamespaceListResponse { + s.Headers = v + return s +} + +type GetRegionRequest struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` +} + +func (s GetRegionRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRegionRequest) GoString() string { + return s.String() +} + +func (s *GetRegionRequest) SetDomain(v string) *GetRegionRequest { + s.Domain = &v + return s +} + +type GetRegionResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRegionResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRegionResponse) GoString() string { + return s.String() +} + +func (s *GetRegionResponse) SetHeaders(v map[string]*string) *GetRegionResponse { + s.Headers = v + return s +} + +type GetRegionListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRegionListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRegionListResponse) GoString() string { + return s.String() +} + +func (s *GetRegionListResponse) SetHeaders(v map[string]*string) *GetRegionListResponse { + s.Headers = v + return s +} + +type GetRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoResponse) GoString() string { + return s.String() +} + +func (s *GetRepoResponse) SetHeaders(v map[string]*string) *GetRepoResponse { + s.Headers = v + return s +} + +type GetRepoBuildListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s GetRepoBuildListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildListRequest) SetPage(v int32) *GetRepoBuildListRequest { + s.Page = &v + return s +} + +func (s *GetRepoBuildListRequest) SetPageSize(v int32) *GetRepoBuildListRequest { + s.PageSize = &v + return s +} + +type GetRepoBuildListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildListResponse) SetHeaders(v map[string]*string) *GetRepoBuildListResponse { + s.Headers = v + return s +} + +type GetRepoBuildRuleListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildRuleListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRuleListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRuleListResponse) SetHeaders(v map[string]*string) *GetRepoBuildRuleListResponse { + s.Headers = v + return s +} + +type GetRepoBuildStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoBuildStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildStatusResponse) SetHeaders(v map[string]*string) *GetRepoBuildStatusResponse { + s.Headers = v + return s +} + +type GetRepoListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoListRequest) SetPage(v int32) *GetRepoListRequest { + s.Page = &v + return s +} + +func (s *GetRepoListRequest) SetPageSize(v int32) *GetRepoListRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoListRequest) SetStatus(v string) *GetRepoListRequest { + s.Status = &v + return s +} + +type GetRepoListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoListResponse) SetHeaders(v map[string]*string) *GetRepoListResponse { + s.Headers = v + return s +} + +type GetRepoListByNamespaceRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoListByNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListByNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetRepoListByNamespaceRequest) SetPage(v int32) *GetRepoListByNamespaceRequest { + s.Page = &v + return s +} + +func (s *GetRepoListByNamespaceRequest) SetPageSize(v int32) *GetRepoListByNamespaceRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoListByNamespaceRequest) SetStatus(v string) *GetRepoListByNamespaceRequest { + s.Status = &v + return s +} + +type GetRepoListByNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoListByNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoListByNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetRepoListByNamespaceResponse) SetHeaders(v map[string]*string) *GetRepoListByNamespaceResponse { + s.Headers = v + return s +} + +type GetRepoTagResponseBody struct { + Digest *string `json:"digest,omitempty" xml:"digest,omitempty"` + ImageCreate *int64 `json:"imageCreate,omitempty" xml:"imageCreate,omitempty"` + ImageId *string `json:"imageId,omitempty" xml:"imageId,omitempty"` + ImageSize *int64 `json:"imageSize,omitempty" xml:"imageSize,omitempty"` + ImageUpdate *int64 `json:"imageUpdate,omitempty" xml:"imageUpdate,omitempty"` + RequestId *string `json:"requestId,omitempty" xml:"requestId,omitempty"` + Status *string `json:"status,omitempty" xml:"status,omitempty"` + Tag *string `json:"tag,omitempty" xml:"tag,omitempty"` +} + +func (s GetRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponseBody) SetDigest(v string) *GetRepoTagResponseBody { + s.Digest = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageCreate(v int64) *GetRepoTagResponseBody { + s.ImageCreate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageId(v string) *GetRepoTagResponseBody { + s.ImageId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageSize(v int64) *GetRepoTagResponseBody { + s.ImageSize = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageUpdate(v int64) *GetRepoTagResponseBody { + s.ImageUpdate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetRequestId(v string) *GetRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetStatus(v string) *GetRepoTagResponseBody { + s.Status = &v + return s +} + +func (s *GetRepoTagResponseBody) SetTag(v string) *GetRepoTagResponseBody { + s.Tag = &v + return s +} + +type GetRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponse) SetHeaders(v map[string]*string) *GetRepoTagResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagResponse) SetBody(v *GetRepoTagResponseBody) *GetRepoTagResponse { + s.Body = v + return s +} + +type GetRepoTagScanListRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` +} + +func (s GetRepoTagScanListRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanListRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanListRequest) SetPage(v int32) *GetRepoTagScanListRequest { + s.Page = &v + return s +} + +func (s *GetRepoTagScanListRequest) SetPageSize(v int32) *GetRepoTagScanListRequest { + s.PageSize = &v + return s +} + +func (s *GetRepoTagScanListRequest) SetSeverity(v string) *GetRepoTagScanListRequest { + s.Severity = &v + return s +} + +type GetRepoTagScanListResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanListResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanListResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanListResponse) SetHeaders(v map[string]*string) *GetRepoTagScanListResponse { + s.Headers = v + return s +} + +type GetRepoTagScanStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponse) SetHeaders(v map[string]*string) *GetRepoTagScanStatusResponse { + s.Headers = v + return s +} + +type GetRepoTagScanSummaryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagScanSummaryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponse) SetHeaders(v map[string]*string) *GetRepoTagScanSummaryResponse { + s.Headers = v + return s +} + +type GetRepoTagsRequest struct { + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s GetRepoTagsRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagsRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagsRequest) SetPage(v int32) *GetRepoTagsRequest { + s.Page = &v + return s +} + +func (s *GetRepoTagsRequest) SetPageSize(v int32) *GetRepoTagsRequest { + s.PageSize = &v + return s +} + +type GetRepoTagsResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoTagsResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagsResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagsResponse) SetHeaders(v map[string]*string) *GetRepoTagsResponse { + s.Headers = v + return s +} + +type GetRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *GetRepoWebhookResponse) SetHeaders(v map[string]*string) *GetRepoWebhookResponse { + s.Headers = v + return s +} + +type GetResourceQuotaResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s GetResourceQuotaResponse) String() string { + return tea.Prettify(s) +} + +func (s GetResourceQuotaResponse) GoString() string { + return s.String() +} + +func (s *GetResourceQuotaResponse) SetHeaders(v map[string]*string) *GetResourceQuotaResponse { + s.Headers = v + return s +} + +type StartImageScanResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s StartImageScanResponse) String() string { + return tea.Prettify(s) +} + +func (s StartImageScanResponse) GoString() string { + return s.String() +} + +func (s *StartImageScanResponse) SetHeaders(v map[string]*string) *StartImageScanResponse { + s.Headers = v + return s +} + +type StartRepoBuildByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s StartRepoBuildByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s StartRepoBuildByRuleResponse) GoString() string { + return s.String() +} + +func (s *StartRepoBuildByRuleResponse) SetHeaders(v map[string]*string) *StartRepoBuildByRuleResponse { + s.Headers = v + return s +} + +type UpdateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponse) SetHeaders(v map[string]*string) *UpdateNamespaceResponse { + s.Headers = v + return s +} + +type UpdateRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoResponse) SetHeaders(v map[string]*string) *UpdateRepoResponse { + s.Headers = v + return s +} + +type UpdateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *UpdateRepoBuildRuleResponse { + s.Headers = v + return s +} + +type UpdateRepoWebhookResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateRepoWebhookResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoWebhookResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoWebhookResponse) SetHeaders(v map[string]*string) *UpdateRepoWebhookResponse { + s.Headers = v + return s +} + +type UpdateUserInfoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` +} + +func (s UpdateUserInfoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateUserInfoResponse) GoString() string { + return s.String() +} + +func (s *UpdateUserInfoResponse) SetHeaders(v map[string]*string) *UpdateUserInfoResponse { + s.Headers = v + return s +} + +type Client struct { + openapi.Client +} + +func NewClient(config *openapi.Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *openapi.Config) (_err error) { + _err = client.Client.Init(config) + if _err != nil { + return _err + } + client.EndpointRule = tea.String("regional") + _err = client.CheckConfig(config) + if _err != nil { + return _err + } + client.Endpoint, _err = client.GetEndpoint(tea.String("cr"), client.RegionId, client.EndpointRule, client.Network, client.Suffix, client.EndpointMap, client.Endpoint) + if _err != nil { + return _err + } + + return nil +} + +func (client *Client) GetEndpoint(productId *string, regionId *string, endpointRule *string, network *string, suffix *string, endpointMap map[string]*string, endpoint *string) (_result *string, _err error) { + if !tea.BoolValue(util.Empty(endpoint)) { + _result = endpoint + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(endpointMap)) && !tea.BoolValue(util.Empty(endpointMap[tea.StringValue(regionId)])) { + _result = endpointMap[tea.StringValue(regionId)] + return _result, _err + } + + _body, _err := endpointutil.GetEndpointRules(productId, regionId, endpointRule, network, suffix) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuild(RepoNamespace *string, RepoName *string, BuildId *string) (_result *CancelRepoBuildResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CancelRepoBuildResponse{} + _body, _err := client.CancelRepoBuildWithOptions(RepoNamespace, RepoName, BuildId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuildWithOptions(RepoNamespace *string, RepoName *string, BuildId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CancelRepoBuildResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildId = openapiutil.GetEncodeParam(BuildId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CancelRepoBuild"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build/" + tea.StringValue(BuildId) + "/cancel"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CancelRepoBuildResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateNamespace() (_result *CreateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateNamespaceResponse{} + _body, _err := client.CreateNamespaceWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateNamespaceWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateNamespaceResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepo() (_result *CreateRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoResponse{} + _body, _err := client.CreateRepoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoBuildRule(RepoNamespace *string, RepoName *string) (_result *CreateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CreateRepoBuildRuleWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoWebhook(RepoNamespace *string, RepoName *string) (_result *CreateRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateRepoWebhookResponse{} + _body, _err := client.CreateRepoWebhookWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateUserInfo() (_result *CreateUserInfoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &CreateUserInfoResponse{} + _body, _err := client.CreateUserInfoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateUserInfoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *CreateUserInfoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("CreateUserInfo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/users"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &CreateUserInfoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteImage(RepoNamespace *string, RepoName *string, Tag *string) (_result *DeleteImageResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteImageResponse{} + _body, _err := client.DeleteImageWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteImageWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteImageResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteImage"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteImageResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteNamespace(Namespace *string) (_result *DeleteNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteNamespaceResponse{} + _body, _err := client.DeleteNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepo(RepoNamespace *string, RepoName *string) (_result *DeleteRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoResponse{} + _body, _err := client.DeleteRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoBuildRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *DeleteRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.DeleteRepoBuildRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoWebhook(RepoNamespace *string, RepoName *string, WebhookId *string) (_result *DeleteRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &DeleteRepoWebhookResponse{} + _body, _err := client.DeleteRepoWebhookWithOptions(RepoNamespace, RepoName, WebhookId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, WebhookId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *DeleteRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + WebhookId = openapiutil.GetEncodeParam(WebhookId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks/" + tea.StringValue(WebhookId)), + Method: tea.String("DELETE"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &DeleteRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetAuthorizationToken() (_result *GetAuthorizationTokenResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.GetAuthorizationTokenWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetAuthorizationTokenWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetAuthorizationTokenResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetAuthorizationToken"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/tokens"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetImageLayer(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetImageLayerResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetImageLayerResponse{} + _body, _err := client.GetImageLayerWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetImageLayerWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetImageLayerResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetImageLayer"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/layers"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetImageLayerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetImageManifest(RepoNamespace *string, RepoName *string, Tag *string, request *GetImageManifestRequest) (_result *GetImageManifestResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetImageManifestResponse{} + _body, _err := client.GetImageManifestWithOptions(RepoNamespace, RepoName, Tag, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetImageManifestWithOptions(RepoNamespace *string, RepoName *string, Tag *string, request *GetImageManifestRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetImageManifestResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.SchemaVersion)) { + query["SchemaVersion"] = request.SchemaVersion + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetImageManifest"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/manifest"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetImageManifestResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespace(Namespace *string) (_result *GetNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetNamespaceResponse{} + _body, _err := client.GetNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespaceList(request *GetNamespaceListRequest) (_result *GetNamespaceListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetNamespaceListResponse{} + _body, _err := client.GetNamespaceListWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceListWithOptions(request *GetNamespaceListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetNamespaceListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Authorize)) { + query["Authorize"] = request.Authorize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetNamespaceList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetNamespaceListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRegion(request *GetRegionRequest) (_result *GetRegionResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRegionResponse{} + _body, _err := client.GetRegionWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRegionWithOptions(request *GetRegionRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRegionResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Domain)) { + query["Domain"] = request.Domain + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRegion"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/regions"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRegionResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRegionList() (_result *GetRegionListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRegionListResponse{} + _body, _err := client.GetRegionListWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRegionListWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRegionListResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRegionList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/regions"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRegionListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepo(RepoNamespace *string, RepoName *string) (_result *GetRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoResponse{} + _body, _err := client.GetRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildList(RepoNamespace *string, RepoName *string, request *GetRepoBuildListRequest) (_result *GetRepoBuildListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildListResponse{} + _body, _err := client.GetRepoBuildListWithOptions(RepoNamespace, RepoName, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildListWithOptions(RepoNamespace *string, RepoName *string, request *GetRepoBuildListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRuleList(RepoNamespace *string, RepoName *string) (_result *GetRepoBuildRuleListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildRuleListResponse{} + _body, _err := client.GetRepoBuildRuleListWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRuleListWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildRuleListResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRuleList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildRuleListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildStatus(RepoNamespace *string, RepoName *string, BuildId *string) (_result *GetRepoBuildStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoBuildStatusResponse{} + _body, _err := client.GetRepoBuildStatusWithOptions(RepoNamespace, RepoName, BuildId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildStatusWithOptions(RepoNamespace *string, RepoName *string, BuildId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoBuildStatusResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildId = openapiutil.GetEncodeParam(BuildId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildStatus"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/build/" + tea.StringValue(BuildId) + "/status"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoBuildStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoList(request *GetRepoListRequest) (_result *GetRepoListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoListResponse{} + _body, _err := client.GetRepoListWithOptions(request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoListWithOptions(request *GetRepoListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoListByNamespace(RepoNamespace *string, request *GetRepoListByNamespaceRequest) (_result *GetRepoListByNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoListByNamespaceResponse{} + _body, _err := client.GetRepoListByNamespaceWithOptions(RepoNamespace, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoListByNamespaceWithOptions(RepoNamespace *string, request *GetRepoListByNamespaceRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoListByNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Status)) { + query["Status"] = request.Status + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoListByNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoListByNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTag(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagResponse{} + _body, _err := client.GetRepoTagWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTag"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanList(RepoNamespace *string, RepoName *string, Tag *string, request *GetRepoTagScanListRequest) (_result *GetRepoTagScanListResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanListResponse{} + _body, _err := client.GetRepoTagScanListWithOptions(RepoNamespace, RepoName, Tag, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanListWithOptions(RepoNamespace *string, RepoName *string, Tag *string, request *GetRepoTagScanListRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanListResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.Severity)) { + query["Severity"] = request.Severity + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanList"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanResult"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanListResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanStatus(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagScanStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.GetRepoTagScanStatusWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanStatusWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanStatusResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanStatus"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanStatus"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanSummary(RepoNamespace *string, RepoName *string, Tag *string) (_result *GetRepoTagScanSummaryResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.GetRepoTagScanSummaryWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanSummaryWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagScanSummaryResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanSummary"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scanCount"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTags(RepoNamespace *string, RepoName *string, request *GetRepoTagsRequest) (_result *GetRepoTagsResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoTagsResponse{} + _body, _err := client.GetRepoTagsWithOptions(RepoNamespace, RepoName, request, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagsWithOptions(RepoNamespace *string, RepoName *string, request *GetRepoTagsRequest, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoTagsResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Page)) { + query["Page"] = request.Page + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Headers: headers, + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTags"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoTagsResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoWebhook(RepoNamespace *string, RepoName *string) (_result *GetRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetRepoWebhookResponse{} + _body, _err := client.GetRepoWebhookWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetResourceQuota(ResourceName *string) (_result *GetResourceQuotaResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &GetResourceQuotaResponse{} + _body, _err := client.GetResourceQuotaWithOptions(ResourceName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetResourceQuotaWithOptions(ResourceName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *GetResourceQuotaResponse, _err error) { + ResourceName = openapiutil.GetEncodeParam(ResourceName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetResourceQuota"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/resource/" + tea.StringValue(ResourceName)), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &GetResourceQuotaResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) StartImageScan(RepoNamespace *string, RepoName *string, Tag *string) (_result *StartImageScanResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &StartImageScanResponse{} + _body, _err := client.StartImageScanWithOptions(RepoNamespace, RepoName, Tag, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) StartImageScanWithOptions(RepoNamespace *string, RepoName *string, Tag *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *StartImageScanResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + Tag = openapiutil.GetEncodeParam(Tag) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("StartImageScan"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/tags/" + tea.StringValue(Tag) + "/scan"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &StartImageScanResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) StartRepoBuildByRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *StartRepoBuildByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &StartRepoBuildByRuleResponse{} + _body, _err := client.StartRepoBuildByRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) StartRepoBuildByRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *StartRepoBuildByRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("StartRepoBuildByRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId) + "/build"), + Method: tea.String("PUT"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &StartRepoBuildByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateNamespace(Namespace *string) (_result *UpdateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateNamespaceResponse{} + _body, _err := client.UpdateNamespaceWithOptions(Namespace, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateNamespaceWithOptions(Namespace *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateNamespaceResponse, _err error) { + Namespace = openapiutil.GetEncodeParam(Namespace) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateNamespace"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/namespace/" + tea.StringValue(Namespace)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepo(RepoNamespace *string, RepoName *string) (_result *UpdateRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoResponse{} + _body, _err := client.UpdateRepoWithOptions(RepoNamespace, RepoName, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoWithOptions(RepoNamespace *string, RepoName *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoBuildRule(RepoNamespace *string, RepoName *string, BuildRuleId *string) (_result *UpdateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.UpdateRepoBuildRuleWithOptions(RepoNamespace, RepoName, BuildRuleId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoBuildRuleWithOptions(RepoNamespace *string, RepoName *string, BuildRuleId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoBuildRuleResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + BuildRuleId = openapiutil.GetEncodeParam(BuildRuleId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoBuildRule"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/rules/" + tea.StringValue(BuildRuleId)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoWebhook(RepoNamespace *string, RepoName *string, WebhookId *string) (_result *UpdateRepoWebhookResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateRepoWebhookResponse{} + _body, _err := client.UpdateRepoWebhookWithOptions(RepoNamespace, RepoName, WebhookId, headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoWebhookWithOptions(RepoNamespace *string, RepoName *string, WebhookId *string, headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateRepoWebhookResponse, _err error) { + RepoNamespace = openapiutil.GetEncodeParam(RepoNamespace) + RepoName = openapiutil.GetEncodeParam(RepoName) + WebhookId = openapiutil.GetEncodeParam(WebhookId) + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoWebhook"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/repos/" + tea.StringValue(RepoNamespace) + "/" + tea.StringValue(RepoName) + "/webhooks/" + tea.StringValue(WebhookId)), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateRepoWebhookResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateUserInfo() (_result *UpdateUserInfoResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &UpdateUserInfoResponse{} + _body, _err := client.UpdateUserInfoWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateUserInfoWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *UpdateUserInfoResponse, _err error) { + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("UpdateUserInfo"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/users"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("none"), + } + _result = &UpdateUserInfoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} diff --git a/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE b/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20181201/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go b/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go new file mode 100644 index 0000000000..abe1988090 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/cr-20181201/client/client.go @@ -0,0 +1,16031 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * + */ +package client + +import ( + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + endpointutil "github.com/alibabacloud-go/endpoint-util/service" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" +) + +type CancelArtifactBuildTaskRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s CancelArtifactBuildTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskRequest) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskRequest) SetBuildTaskId(v string) *CancelArtifactBuildTaskRequest { + s.BuildTaskId = &v + return s +} + +func (s *CancelArtifactBuildTaskRequest) SetInstanceId(v string) *CancelArtifactBuildTaskRequest { + s.InstanceId = &v + return s +} + +type CancelArtifactBuildTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CancelArtifactBuildTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskResponseBody) SetCode(v string) *CancelArtifactBuildTaskResponseBody { + s.Code = &v + return s +} + +func (s *CancelArtifactBuildTaskResponseBody) SetIsSuccess(v bool) *CancelArtifactBuildTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CancelArtifactBuildTaskResponseBody) SetRequestId(v string) *CancelArtifactBuildTaskResponseBody { + s.RequestId = &v + return s +} + +type CancelArtifactBuildTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CancelArtifactBuildTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CancelArtifactBuildTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelArtifactBuildTaskResponse) GoString() string { + return s.String() +} + +func (s *CancelArtifactBuildTaskResponse) SetHeaders(v map[string]*string) *CancelArtifactBuildTaskResponse { + s.Headers = v + return s +} + +func (s *CancelArtifactBuildTaskResponse) SetBody(v *CancelArtifactBuildTaskResponseBody) *CancelArtifactBuildTaskResponse { + s.Body = v + return s +} + +type CancelRepoBuildRecordRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CancelRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordRequest) SetBuildRecordId(v string) *CancelRepoBuildRecordRequest { + s.BuildRecordId = &v + return s +} + +func (s *CancelRepoBuildRecordRequest) SetInstanceId(v string) *CancelRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +func (s *CancelRepoBuildRecordRequest) SetRepoId(v string) *CancelRepoBuildRecordRequest { + s.RepoId = &v + return s +} + +type CancelRepoBuildRecordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CancelRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordResponseBody) SetCode(v string) *CancelRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *CancelRepoBuildRecordResponseBody) SetIsSuccess(v bool) *CancelRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CancelRepoBuildRecordResponseBody) SetRequestId(v string) *CancelRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +type CancelRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CancelRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CancelRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s CancelRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *CancelRepoBuildRecordResponse) SetHeaders(v map[string]*string) *CancelRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *CancelRepoBuildRecordResponse) SetBody(v *CancelRepoBuildRecordResponseBody) *CancelRepoBuildRecordResponse { + s.Body = v + return s +} + +type CreateBuildRecordByRuleRequest struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateBuildRecordByRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleRequest) SetBuildRuleId(v string) *CreateBuildRecordByRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *CreateBuildRecordByRuleRequest) SetInstanceId(v string) *CreateBuildRecordByRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateBuildRecordByRuleRequest) SetRepoId(v string) *CreateBuildRecordByRuleRequest { + s.RepoId = &v + return s +} + +type CreateBuildRecordByRuleResponseBody struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateBuildRecordByRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleResponseBody) SetBuildRecordId(v string) *CreateBuildRecordByRuleResponseBody { + s.BuildRecordId = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetCode(v string) *CreateBuildRecordByRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetIsSuccess(v bool) *CreateBuildRecordByRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateBuildRecordByRuleResponseBody) SetRequestId(v string) *CreateBuildRecordByRuleResponseBody { + s.RequestId = &v + return s +} + +type CreateBuildRecordByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateBuildRecordByRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateBuildRecordByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateBuildRecordByRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateBuildRecordByRuleResponse) SetHeaders(v map[string]*string) *CreateBuildRecordByRuleResponse { + s.Headers = v + return s +} + +func (s *CreateBuildRecordByRuleResponse) SetBody(v *CreateBuildRecordByRuleResponseBody) *CreateBuildRecordByRuleResponse { + s.Body = v + return s +} + +type CreateChainRequest struct { + ChainConfig *string `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s CreateChainRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChainRequest) GoString() string { + return s.String() +} + +func (s *CreateChainRequest) SetChainConfig(v string) *CreateChainRequest { + s.ChainConfig = &v + return s +} + +func (s *CreateChainRequest) SetDescription(v string) *CreateChainRequest { + s.Description = &v + return s +} + +func (s *CreateChainRequest) SetInstanceId(v string) *CreateChainRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChainRequest) SetName(v string) *CreateChainRequest { + s.Name = &v + return s +} + +func (s *CreateChainRequest) SetRepoName(v string) *CreateChainRequest { + s.RepoName = &v + return s +} + +func (s *CreateChainRequest) SetRepoNamespaceName(v string) *CreateChainRequest { + s.RepoNamespaceName = &v + return s +} + +type CreateChainResponseBody struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChainResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChainResponseBody) SetChainId(v string) *CreateChainResponseBody { + s.ChainId = &v + return s +} + +func (s *CreateChainResponseBody) SetCode(v string) *CreateChainResponseBody { + s.Code = &v + return s +} + +func (s *CreateChainResponseBody) SetIsSuccess(v bool) *CreateChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChainResponseBody) SetRequestId(v string) *CreateChainResponseBody { + s.RequestId = &v + return s +} + +type CreateChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChainResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChainResponse) GoString() string { + return s.String() +} + +func (s *CreateChainResponse) SetHeaders(v map[string]*string) *CreateChainResponse { + s.Headers = v + return s +} + +func (s *CreateChainResponse) SetBody(v *CreateChainResponseBody) *CreateChainResponse { + s.Body = v + return s +} + +type CreateChartNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s CreateChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceRequest) SetAutoCreateRepo(v bool) *CreateChartNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetDefaultRepoType(v string) *CreateChartNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetInstanceId(v string) *CreateChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetNamespaceName(v string) *CreateChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateChartNamespaceRequest) SetResourceGroupId(v string) *CreateChartNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +type CreateChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceResponseBody) SetCode(v string) *CreateChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *CreateChartNamespaceResponseBody) SetIsSuccess(v bool) *CreateChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChartNamespaceResponseBody) SetRequestId(v string) *CreateChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type CreateChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateChartNamespaceResponse) SetHeaders(v map[string]*string) *CreateChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *CreateChartNamespaceResponse) SetBody(v *CreateChartNamespaceResponseBody) *CreateChartNamespaceResponse { + s.Body = v + return s +} + +type CreateChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s CreateChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryRequest) SetInstanceId(v string) *CreateChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoName(v string) *CreateChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoNamespaceName(v string) *CreateChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetRepoType(v string) *CreateChartRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *CreateChartRepositoryRequest) SetSummary(v string) *CreateChartRepositoryRequest { + s.Summary = &v + return s +} + +type CreateChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryResponseBody) SetCode(v string) *CreateChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetIsSuccess(v bool) *CreateChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetRepoId(v string) *CreateChartRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *CreateChartRepositoryResponseBody) SetRequestId(v string) *CreateChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type CreateChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *CreateChartRepositoryResponse) SetHeaders(v map[string]*string) *CreateChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *CreateChartRepositoryResponse) SetBody(v *CreateChartRepositoryResponseBody) *CreateChartRepositoryResponse { + s.Body = v + return s +} + +type CreateInstanceEndpointAclPolicyRequest struct { + Comment *string `json:"Comment,omitempty" xml:"Comment,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s CreateInstanceEndpointAclPolicyRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyRequest) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetComment(v string) *CreateInstanceEndpointAclPolicyRequest { + s.Comment = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetEndpointType(v string) *CreateInstanceEndpointAclPolicyRequest { + s.EndpointType = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetEntry(v string) *CreateInstanceEndpointAclPolicyRequest { + s.Entry = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetInstanceId(v string) *CreateInstanceEndpointAclPolicyRequest { + s.InstanceId = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyRequest) SetModuleName(v string) *CreateInstanceEndpointAclPolicyRequest { + s.ModuleName = &v + return s +} + +type CreateInstanceEndpointAclPolicyResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateInstanceEndpointAclPolicyResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyResponseBody) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetCode(v string) *CreateInstanceEndpointAclPolicyResponseBody { + s.Code = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetIsSuccess(v bool) *CreateInstanceEndpointAclPolicyResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponseBody) SetRequestId(v string) *CreateInstanceEndpointAclPolicyResponseBody { + s.RequestId = &v + return s +} + +type CreateInstanceEndpointAclPolicyResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateInstanceEndpointAclPolicyResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateInstanceEndpointAclPolicyResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceEndpointAclPolicyResponse) GoString() string { + return s.String() +} + +func (s *CreateInstanceEndpointAclPolicyResponse) SetHeaders(v map[string]*string) *CreateInstanceEndpointAclPolicyResponse { + s.Headers = v + return s +} + +func (s *CreateInstanceEndpointAclPolicyResponse) SetBody(v *CreateInstanceEndpointAclPolicyResponseBody) *CreateInstanceEndpointAclPolicyResponse { + s.Body = v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcRequest struct { + EnableCreateDNSRecordInPvzt *bool `json:"EnableCreateDNSRecordInPvzt,omitempty" xml:"EnableCreateDNSRecordInPvzt,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcRequest) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetEnableCreateDNSRecordInPvzt(v bool) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.EnableCreateDNSRecordInPvzt = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetInstanceId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.InstanceId = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetModuleName(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.ModuleName = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetVpcId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.VpcId = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcRequest) SetVswitchId(v string) *CreateInstanceVpcEndpointLinkedVpcRequest { + s.VswitchId = &v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponseBody) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetCode(v string) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.Code = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetIsSuccess(v bool) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponseBody) SetRequestId(v string) *CreateInstanceVpcEndpointLinkedVpcResponseBody { + s.RequestId = &v + return s +} + +type CreateInstanceVpcEndpointLinkedVpcResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateInstanceVpcEndpointLinkedVpcResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateInstanceVpcEndpointLinkedVpcResponse) GoString() string { + return s.String() +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponse) SetHeaders(v map[string]*string) *CreateInstanceVpcEndpointLinkedVpcResponse { + s.Headers = v + return s +} + +func (s *CreateInstanceVpcEndpointLinkedVpcResponse) SetBody(v *CreateInstanceVpcEndpointLinkedVpcResponseBody) *CreateInstanceVpcEndpointLinkedVpcResponse { + s.Body = v + return s +} + +type CreateNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s CreateNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceRequest) GoString() string { + return s.String() +} + +func (s *CreateNamespaceRequest) SetAutoCreateRepo(v bool) *CreateNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *CreateNamespaceRequest) SetDefaultRepoType(v string) *CreateNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *CreateNamespaceRequest) SetInstanceId(v string) *CreateNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *CreateNamespaceRequest) SetNamespaceName(v string) *CreateNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateNamespaceRequest) SetResourceGroupId(v string) *CreateNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +type CreateNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponseBody) SetCode(v string) *CreateNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *CreateNamespaceResponseBody) SetIsSuccess(v bool) *CreateNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateNamespaceResponseBody) SetRequestId(v string) *CreateNamespaceResponseBody { + s.RequestId = &v + return s +} + +type CreateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *CreateNamespaceResponse) SetHeaders(v map[string]*string) *CreateNamespaceResponse { + s.Headers = v + return s +} + +func (s *CreateNamespaceResponse) SetBody(v *CreateNamespaceResponseBody) *CreateNamespaceResponse { + s.Body = v + return s +} + +type CreateRepoBuildRuleRequest struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleRequest) SetBuildArgs(v []*string) *CreateRepoBuildRuleRequest { + s.BuildArgs = v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetDockerfileLocation(v string) *CreateRepoBuildRuleRequest { + s.DockerfileLocation = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetDockerfileName(v string) *CreateRepoBuildRuleRequest { + s.DockerfileName = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetImageTag(v string) *CreateRepoBuildRuleRequest { + s.ImageTag = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetInstanceId(v string) *CreateRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPlatforms(v []*string) *CreateRepoBuildRuleRequest { + s.Platforms = v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPushName(v string) *CreateRepoBuildRuleRequest { + s.PushName = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetPushType(v string) *CreateRepoBuildRuleRequest { + s.PushType = &v + return s +} + +func (s *CreateRepoBuildRuleRequest) SetRepoId(v string) *CreateRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type CreateRepoBuildRuleResponseBody struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponseBody) SetBuildRuleId(v string) *CreateRepoBuildRuleResponseBody { + s.BuildRuleId = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetCode(v string) *CreateRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetIsSuccess(v bool) *CreateRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoBuildRuleResponseBody) SetRequestId(v string) *CreateRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *CreateRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoBuildRuleResponse) SetBody(v *CreateRepoBuildRuleResponseBody) *CreateRepoBuildRuleResponse { + s.Body = v + return s +} + +type CreateRepoSourceCodeRepoRequest struct { + AutoBuild *bool `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *bool `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + OverseaBuild *bool `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s CreateRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoRequest) SetAutoBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.AutoBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoName(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoName = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoNamespaceName(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetCodeRepoType(v string) *CreateRepoSourceCodeRepoRequest { + s.CodeRepoType = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetDisableCacheBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.DisableCacheBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetInstanceId(v string) *CreateRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetOverseaBuild(v bool) *CreateRepoSourceCodeRepoRequest { + s.OverseaBuild = &v + return s +} + +func (s *CreateRepoSourceCodeRepoRequest) SetRepoId(v string) *CreateRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type CreateRepoSourceCodeRepoResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetCode(v string) *CreateRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *CreateRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSourceCodeRepoResponseBody) SetRequestId(v string) *CreateRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *CreateRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSourceCodeRepoResponse) SetBody(v *CreateRepoSourceCodeRepoResponseBody) *CreateRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type CreateRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + SyncRuleName *string `json:"SyncRuleName,omitempty" xml:"SyncRuleName,omitempty"` + SyncScope *string `json:"SyncScope,omitempty" xml:"SyncScope,omitempty"` + SyncTrigger *string `json:"SyncTrigger,omitempty" xml:"SyncTrigger,omitempty"` + TagFilter *string `json:"TagFilter,omitempty" xml:"TagFilter,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespaceName *string `json:"TargetNamespaceName,omitempty" xml:"TargetNamespaceName,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` + TargetUserId *string `json:"TargetUserId,omitempty" xml:"TargetUserId,omitempty"` +} + +func (s CreateRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleRequest) SetInstanceId(v string) *CreateRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetNamespaceName(v string) *CreateRepoSyncRuleRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetRepoName(v string) *CreateRepoSyncRuleRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncRuleName(v string) *CreateRepoSyncRuleRequest { + s.SyncRuleName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncScope(v string) *CreateRepoSyncRuleRequest { + s.SyncScope = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetSyncTrigger(v string) *CreateRepoSyncRuleRequest { + s.SyncTrigger = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTagFilter(v string) *CreateRepoSyncRuleRequest { + s.TagFilter = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetInstanceId(v string) *CreateRepoSyncRuleRequest { + s.TargetInstanceId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetNamespaceName(v string) *CreateRepoSyncRuleRequest { + s.TargetNamespaceName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetRegionId(v string) *CreateRepoSyncRuleRequest { + s.TargetRegionId = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetRepoName(v string) *CreateRepoSyncRuleRequest { + s.TargetRepoName = &v + return s +} + +func (s *CreateRepoSyncRuleRequest) SetTargetUserId(v string) *CreateRepoSyncRuleRequest { + s.TargetUserId = &v + return s +} + +type CreateRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` +} + +func (s CreateRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleResponseBody) SetCode(v string) *CreateRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetIsSuccess(v bool) *CreateRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetRequestId(v string) *CreateRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncRuleResponseBody) SetSyncRuleId(v string) *CreateRepoSyncRuleResponseBody { + s.SyncRuleId = &v + return s +} + +type CreateRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncRuleResponse) SetHeaders(v map[string]*string) *CreateRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncRuleResponse) SetBody(v *CreateRepoSyncRuleResponseBody) *CreateRepoSyncRuleResponse { + s.Body = v + return s +} + +type CreateRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Override *bool `json:"Override,omitempty" xml:"Override,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespace *string `json:"TargetNamespace,omitempty" xml:"TargetNamespace,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` + TargetTag *string `json:"TargetTag,omitempty" xml:"TargetTag,omitempty"` + TargetUserId *string `json:"TargetUserId,omitempty" xml:"TargetUserId,omitempty"` +} + +func (s CreateRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskRequest) SetInstanceId(v string) *CreateRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetOverride(v bool) *CreateRepoSyncTaskRequest { + s.Override = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetRepoId(v string) *CreateRepoSyncTaskRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTag(v string) *CreateRepoSyncTaskRequest { + s.Tag = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetInstanceId(v string) *CreateRepoSyncTaskRequest { + s.TargetInstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetNamespace(v string) *CreateRepoSyncTaskRequest { + s.TargetNamespace = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetRegionId(v string) *CreateRepoSyncTaskRequest { + s.TargetRegionId = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetRepoName(v string) *CreateRepoSyncTaskRequest { + s.TargetRepoName = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetTag(v string) *CreateRepoSyncTaskRequest { + s.TargetTag = &v + return s +} + +func (s *CreateRepoSyncTaskRequest) SetTargetUserId(v string) *CreateRepoSyncTaskRequest { + s.TargetUserId = &v + return s +} + +type CreateRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s CreateRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskResponseBody) SetCode(v string) *CreateRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetIsSuccess(v bool) *CreateRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetRequestId(v string) *CreateRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncTaskResponseBody) SetSyncTaskId(v string) *CreateRepoSyncTaskResponseBody { + s.SyncTaskId = &v + return s +} + +type CreateRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskResponse) SetHeaders(v map[string]*string) *CreateRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncTaskResponse) SetBody(v *CreateRepoSyncTaskResponseBody) *CreateRepoSyncTaskResponse { + s.Body = v + return s +} + +type CreateRepoSyncTaskByRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s CreateRepoSyncTaskByRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetInstanceId(v string) *CreateRepoSyncTaskByRuleRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetRepoId(v string) *CreateRepoSyncTaskByRuleRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetSyncRuleId(v string) *CreateRepoSyncTaskByRuleRequest { + s.SyncRuleId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleRequest) SetTag(v string) *CreateRepoSyncTaskByRuleRequest { + s.Tag = &v + return s +} + +type CreateRepoSyncTaskByRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s CreateRepoSyncTaskByRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetCode(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetIsSuccess(v bool) *CreateRepoSyncTaskByRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetRequestId(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponseBody) SetSyncTaskId(v string) *CreateRepoSyncTaskByRuleResponseBody { + s.SyncTaskId = &v + return s +} + +type CreateRepoSyncTaskByRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoSyncTaskByRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoSyncTaskByRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoSyncTaskByRuleResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoSyncTaskByRuleResponse) SetHeaders(v map[string]*string) *CreateRepoSyncTaskByRuleResponse { + s.Headers = v + return s +} + +func (s *CreateRepoSyncTaskByRuleResponse) SetBody(v *CreateRepoSyncTaskByRuleResponseBody) *CreateRepoSyncTaskByRuleResponse { + s.Body = v + return s +} + +type CreateRepoTagRequest struct { + FromTag *string `json:"FromTag,omitempty" xml:"FromTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + ToTag *string `json:"ToTag,omitempty" xml:"ToTag,omitempty"` +} + +func (s CreateRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTagRequest) SetFromTag(v string) *CreateRepoTagRequest { + s.FromTag = &v + return s +} + +func (s *CreateRepoTagRequest) SetInstanceId(v string) *CreateRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTagRequest) SetNamespaceName(v string) *CreateRepoTagRequest { + s.NamespaceName = &v + return s +} + +func (s *CreateRepoTagRequest) SetRepoName(v string) *CreateRepoTagRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepoTagRequest) SetToTag(v string) *CreateRepoTagRequest { + s.ToTag = &v + return s +} + +type CreateRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTagResponseBody) SetCode(v string) *CreateRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTagResponseBody) SetIsSuccess(v bool) *CreateRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTagResponseBody) SetRequestId(v string) *CreateRepoTagResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTagResponse) SetHeaders(v map[string]*string) *CreateRepoTagResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTagResponse) SetBody(v *CreateRepoTagResponseBody) *CreateRepoTagResponse { + s.Body = v + return s +} + +type CreateRepoTagScanTaskRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanService *string `json:"ScanService,omitempty" xml:"ScanService,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s CreateRepoTagScanTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskRequest) SetDigest(v string) *CreateRepoTagScanTaskRequest { + s.Digest = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetInstanceId(v string) *CreateRepoTagScanTaskRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetRepoId(v string) *CreateRepoTagScanTaskRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetScanService(v string) *CreateRepoTagScanTaskRequest { + s.ScanService = &v + return s +} + +func (s *CreateRepoTagScanTaskRequest) SetTag(v string) *CreateRepoTagScanTaskRequest { + s.Tag = &v + return s +} + +type CreateRepoTagScanTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepoTagScanTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskResponseBody) SetCode(v string) *CreateRepoTagScanTaskResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTagScanTaskResponseBody) SetIsSuccess(v bool) *CreateRepoTagScanTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTagScanTaskResponseBody) SetRequestId(v string) *CreateRepoTagScanTaskResponseBody { + s.RequestId = &v + return s +} + +type CreateRepoTagScanTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTagScanTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTagScanTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTagScanTaskResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTagScanTaskResponse) SetHeaders(v map[string]*string) *CreateRepoTagScanTaskResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTagScanTaskResponse) SetBody(v *CreateRepoTagScanTaskResponseBody) *CreateRepoTagScanTaskResponse { + s.Body = v + return s +} + +type CreateRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s CreateRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerRequest) SetInstanceId(v string) *CreateRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetRepoId(v string) *CreateRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerName(v string) *CreateRepoTriggerRequest { + s.TriggerName = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerTag(v string) *CreateRepoTriggerRequest { + s.TriggerTag = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerType(v string) *CreateRepoTriggerRequest { + s.TriggerType = &v + return s +} + +func (s *CreateRepoTriggerRequest) SetTriggerUrl(v string) *CreateRepoTriggerRequest { + s.TriggerUrl = &v + return s +} + +type CreateRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` +} + +func (s CreateRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerResponseBody) SetCode(v string) *CreateRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetIsSuccess(v bool) *CreateRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetRequestId(v string) *CreateRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +func (s *CreateRepoTriggerResponseBody) SetTriggerId(v string) *CreateRepoTriggerResponseBody { + s.TriggerId = &v + return s +} + +type CreateRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *CreateRepoTriggerResponse) SetHeaders(v map[string]*string) *CreateRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *CreateRepoTriggerResponse) SetBody(v *CreateRepoTriggerResponseBody) *CreateRepoTriggerResponse { + s.Body = v + return s +} + +type CreateRepositoryRequest struct { + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s CreateRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryRequest) GoString() string { + return s.String() +} + +func (s *CreateRepositoryRequest) SetDetail(v string) *CreateRepositoryRequest { + s.Detail = &v + return s +} + +func (s *CreateRepositoryRequest) SetInstanceId(v string) *CreateRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoName(v string) *CreateRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoNamespaceName(v string) *CreateRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *CreateRepositoryRequest) SetRepoType(v string) *CreateRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *CreateRepositoryRequest) SetResourceGroupId(v string) *CreateRepositoryRequest { + s.ResourceGroupId = &v + return s +} + +func (s *CreateRepositoryRequest) SetSummary(v string) *CreateRepositoryRequest { + s.Summary = &v + return s +} + +func (s *CreateRepositoryRequest) SetTagImmutability(v bool) *CreateRepositoryRequest { + s.TagImmutability = &v + return s +} + +type CreateRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s CreateRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *CreateRepositoryResponseBody) SetCode(v string) *CreateRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetIsSuccess(v bool) *CreateRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetRepoId(v string) *CreateRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *CreateRepositoryResponseBody) SetRequestId(v string) *CreateRepositoryResponseBody { + s.RequestId = &v + return s +} + +type CreateRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *CreateRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s CreateRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s CreateRepositoryResponse) GoString() string { + return s.String() +} + +func (s *CreateRepositoryResponse) SetHeaders(v map[string]*string) *CreateRepositoryResponse { + s.Headers = v + return s +} + +func (s *CreateRepositoryResponse) SetBody(v *CreateRepositoryResponseBody) *CreateRepositoryResponse { + s.Body = v + return s +} + +type DeleteChainRequest struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s DeleteChainRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainRequest) GoString() string { + return s.String() +} + +func (s *DeleteChainRequest) SetChainId(v string) *DeleteChainRequest { + s.ChainId = &v + return s +} + +func (s *DeleteChainRequest) SetInstanceId(v string) *DeleteChainRequest { + s.InstanceId = &v + return s +} + +type DeleteChainResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChainResponseBody) SetCode(v string) *DeleteChainResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChainResponseBody) SetIsSuccess(v bool) *DeleteChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChainResponseBody) SetRequestId(v string) *DeleteChainResponseBody { + s.RequestId = &v + return s +} + +type DeleteChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChainResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChainResponse) GoString() string { + return s.String() +} + +func (s *DeleteChainResponse) SetHeaders(v map[string]*string) *DeleteChainResponse { + s.Headers = v + return s +} + +func (s *DeleteChainResponse) SetBody(v *DeleteChainResponseBody) *DeleteChainResponse { + s.Body = v + return s +} + +type DeleteChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s DeleteChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceRequest) SetInstanceId(v string) *DeleteChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartNamespaceRequest) SetNamespaceName(v string) *DeleteChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type DeleteChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceResponseBody) SetCode(v string) *DeleteChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartNamespaceResponseBody) SetIsSuccess(v bool) *DeleteChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartNamespaceResponseBody) SetRequestId(v string) *DeleteChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartNamespaceResponse) SetHeaders(v map[string]*string) *DeleteChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *DeleteChartNamespaceResponse) SetBody(v *DeleteChartNamespaceResponseBody) *DeleteChartNamespaceResponse { + s.Body = v + return s +} + +type DeleteChartReleaseRequest struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Release *string `json:"Release,omitempty" xml:"Release,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s DeleteChartReleaseRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseRequest) SetChart(v string) *DeleteChartReleaseRequest { + s.Chart = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetInstanceId(v string) *DeleteChartReleaseRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRelease(v string) *DeleteChartReleaseRequest { + s.Release = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRepoName(v string) *DeleteChartReleaseRequest { + s.RepoName = &v + return s +} + +func (s *DeleteChartReleaseRequest) SetRepoNamespaceName(v string) *DeleteChartReleaseRequest { + s.RepoNamespaceName = &v + return s +} + +type DeleteChartReleaseResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartReleaseResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseResponseBody) SetCode(v string) *DeleteChartReleaseResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartReleaseResponseBody) SetIsSuccess(v bool) *DeleteChartReleaseResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartReleaseResponseBody) SetRequestId(v string) *DeleteChartReleaseResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartReleaseResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartReleaseResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartReleaseResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartReleaseResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartReleaseResponse) SetHeaders(v map[string]*string) *DeleteChartReleaseResponse { + s.Headers = v + return s +} + +func (s *DeleteChartReleaseResponse) SetBody(v *DeleteChartReleaseResponseBody) *DeleteChartReleaseResponse { + s.Body = v + return s +} + +type DeleteChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s DeleteChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryRequest) SetInstanceId(v string) *DeleteChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteChartRepositoryRequest) SetRepoName(v string) *DeleteChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *DeleteChartRepositoryRequest) SetRepoNamespaceName(v string) *DeleteChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type DeleteChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryResponseBody) SetCode(v string) *DeleteChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *DeleteChartRepositoryResponseBody) SetIsSuccess(v bool) *DeleteChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteChartRepositoryResponseBody) SetRequestId(v string) *DeleteChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type DeleteChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *DeleteChartRepositoryResponse) SetHeaders(v map[string]*string) *DeleteChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *DeleteChartRepositoryResponse) SetBody(v *DeleteChartRepositoryResponseBody) *DeleteChartRepositoryResponse { + s.Body = v + return s +} + +type DeleteEventCenterRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s DeleteEventCenterRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleRequest) SetInstanceId(v string) *DeleteEventCenterRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteEventCenterRuleRequest) SetRuleId(v string) *DeleteEventCenterRuleRequest { + s.RuleId = &v + return s +} + +type DeleteEventCenterRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteEventCenterRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleResponseBody) SetCode(v string) *DeleteEventCenterRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteEventCenterRuleResponseBody) SetRequestId(v string) *DeleteEventCenterRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteEventCenterRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteEventCenterRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteEventCenterRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteEventCenterRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteEventCenterRuleResponse) SetHeaders(v map[string]*string) *DeleteEventCenterRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteEventCenterRuleResponse) SetBody(v *DeleteEventCenterRuleResponseBody) *DeleteEventCenterRuleResponse { + s.Body = v + return s +} + +type DeleteInstanceEndpointAclPolicyRequest struct { + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s DeleteInstanceEndpointAclPolicyRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyRequest) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetEndpointType(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.EndpointType = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetEntry(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.Entry = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetInstanceId(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyRequest) SetModuleName(v string) *DeleteInstanceEndpointAclPolicyRequest { + s.ModuleName = &v + return s +} + +type DeleteInstanceEndpointAclPolicyResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteInstanceEndpointAclPolicyResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetCode(v string) *DeleteInstanceEndpointAclPolicyResponseBody { + s.Code = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetIsSuccess(v bool) *DeleteInstanceEndpointAclPolicyResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponseBody) SetRequestId(v string) *DeleteInstanceEndpointAclPolicyResponseBody { + s.RequestId = &v + return s +} + +type DeleteInstanceEndpointAclPolicyResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteInstanceEndpointAclPolicyResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteInstanceEndpointAclPolicyResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceEndpointAclPolicyResponse) GoString() string { + return s.String() +} + +func (s *DeleteInstanceEndpointAclPolicyResponse) SetHeaders(v map[string]*string) *DeleteInstanceEndpointAclPolicyResponse { + s.Headers = v + return s +} + +func (s *DeleteInstanceEndpointAclPolicyResponse) SetBody(v *DeleteInstanceEndpointAclPolicyResponseBody) *DeleteInstanceEndpointAclPolicyResponse { + s.Body = v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcRequest) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetInstanceId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetModuleName(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.ModuleName = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetVpcId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.VpcId = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcRequest) SetVswitchId(v string) *DeleteInstanceVpcEndpointLinkedVpcRequest { + s.VswitchId = &v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetCode(v string) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.Code = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetIsSuccess(v bool) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponseBody) SetRequestId(v string) *DeleteInstanceVpcEndpointLinkedVpcResponseBody { + s.RequestId = &v + return s +} + +type DeleteInstanceVpcEndpointLinkedVpcResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteInstanceVpcEndpointLinkedVpcResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteInstanceVpcEndpointLinkedVpcResponse) GoString() string { + return s.String() +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponse) SetHeaders(v map[string]*string) *DeleteInstanceVpcEndpointLinkedVpcResponse { + s.Headers = v + return s +} + +func (s *DeleteInstanceVpcEndpointLinkedVpcResponse) SetBody(v *DeleteInstanceVpcEndpointLinkedVpcResponseBody) *DeleteInstanceVpcEndpointLinkedVpcResponse { + s.Body = v + return s +} + +type DeleteNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s DeleteNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceRequest) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceRequest) SetInstanceId(v string) *DeleteNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteNamespaceRequest) SetNamespaceName(v string) *DeleteNamespaceRequest { + s.NamespaceName = &v + return s +} + +type DeleteNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponseBody) SetCode(v string) *DeleteNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *DeleteNamespaceResponseBody) SetIsSuccess(v bool) *DeleteNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteNamespaceResponseBody) SetRequestId(v string) *DeleteNamespaceResponseBody { + s.RequestId = &v + return s +} + +type DeleteNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteNamespaceResponse) GoString() string { + return s.String() +} + +func (s *DeleteNamespaceResponse) SetHeaders(v map[string]*string) *DeleteNamespaceResponse { + s.Headers = v + return s +} + +func (s *DeleteNamespaceResponse) SetBody(v *DeleteNamespaceResponseBody) *DeleteNamespaceResponse { + s.Body = v + return s +} + +type DeleteRepoBuildRuleRequest struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s DeleteRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleRequest) SetBuildRuleId(v string) *DeleteRepoBuildRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *DeleteRepoBuildRuleRequest) SetInstanceId(v string) *DeleteRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoBuildRuleRequest) SetRepoId(v string) *DeleteRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type DeleteRepoBuildRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponseBody) SetCode(v string) *DeleteRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoBuildRuleResponseBody) SetIsSuccess(v bool) *DeleteRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoBuildRuleResponseBody) SetRequestId(v string) *DeleteRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoBuildRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoBuildRuleResponse) SetBody(v *DeleteRepoBuildRuleResponseBody) *DeleteRepoBuildRuleResponse { + s.Body = v + return s +} + +type DeleteRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` +} + +func (s DeleteRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleRequest) SetInstanceId(v string) *DeleteRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoSyncRuleRequest) SetSyncRuleId(v string) *DeleteRepoSyncRuleRequest { + s.SyncRuleId = &v + return s +} + +type DeleteRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleResponseBody) SetCode(v string) *DeleteRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoSyncRuleResponseBody) SetIsSuccess(v bool) *DeleteRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoSyncRuleResponseBody) SetRequestId(v string) *DeleteRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoSyncRuleResponse) SetHeaders(v map[string]*string) *DeleteRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoSyncRuleResponse) SetBody(v *DeleteRepoSyncRuleResponseBody) *DeleteRepoSyncRuleResponse { + s.Body = v + return s +} + +type DeleteRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s DeleteRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagRequest) SetInstanceId(v string) *DeleteRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoTagRequest) SetRepoId(v string) *DeleteRepoTagRequest { + s.RepoId = &v + return s +} + +func (s *DeleteRepoTagRequest) SetTag(v string) *DeleteRepoTagRequest { + s.Tag = &v + return s +} + +type DeleteRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagResponseBody) SetCode(v string) *DeleteRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoTagResponseBody) SetIsSuccess(v bool) *DeleteRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoTagResponseBody) SetRequestId(v string) *DeleteRepoTagResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTagResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoTagResponse) SetHeaders(v map[string]*string) *DeleteRepoTagResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoTagResponse) SetBody(v *DeleteRepoTagResponseBody) *DeleteRepoTagResponse { + s.Body = v + return s +} + +type DeleteRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` +} + +func (s DeleteRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerRequest) SetInstanceId(v string) *DeleteRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepoTriggerRequest) SetRepoId(v string) *DeleteRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *DeleteRepoTriggerRequest) SetTriggerId(v string) *DeleteRepoTriggerRequest { + s.TriggerId = &v + return s +} + +type DeleteRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerResponseBody) SetCode(v string) *DeleteRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepoTriggerResponseBody) SetIsSuccess(v bool) *DeleteRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepoTriggerResponseBody) SetRequestId(v string) *DeleteRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepoTriggerResponse) SetHeaders(v map[string]*string) *DeleteRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *DeleteRepoTriggerResponse) SetBody(v *DeleteRepoTriggerResponseBody) *DeleteRepoTriggerResponse { + s.Body = v + return s +} + +type DeleteRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s DeleteRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryRequest) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryRequest) SetInstanceId(v string) *DeleteRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *DeleteRepositoryRequest) SetRepoId(v string) *DeleteRepositoryRequest { + s.RepoId = &v + return s +} + +type DeleteRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s DeleteRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryResponseBody) SetCode(v string) *DeleteRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *DeleteRepositoryResponseBody) SetIsSuccess(v bool) *DeleteRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *DeleteRepositoryResponseBody) SetRequestId(v string) *DeleteRepositoryResponseBody { + s.RequestId = &v + return s +} + +type DeleteRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *DeleteRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s DeleteRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s DeleteRepositoryResponse) GoString() string { + return s.String() +} + +func (s *DeleteRepositoryResponse) SetHeaders(v map[string]*string) *DeleteRepositoryResponse { + s.Headers = v + return s +} + +func (s *DeleteRepositoryResponse) SetBody(v *DeleteRepositoryResponseBody) *DeleteRepositoryResponse { + s.Body = v + return s +} + +type GetArtifactBuildTaskRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetArtifactBuildTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskRequest) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskRequest) SetBuildTaskId(v string) *GetArtifactBuildTaskRequest { + s.BuildTaskId = &v + return s +} + +func (s *GetArtifactBuildTaskRequest) SetInstanceId(v string) *GetArtifactBuildTaskRequest { + s.InstanceId = &v + return s +} + +type GetArtifactBuildTaskResponseBody struct { + ArtifactBuildType *string `json:"ArtifactBuildType,omitempty" xml:"ArtifactBuildType,omitempty"` + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + EndTime *int32 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Instructions []*string `json:"Instructions,omitempty" xml:"Instructions,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SourceArtifact *GetArtifactBuildTaskResponseBodySourceArtifact `json:"SourceArtifact,omitempty" xml:"SourceArtifact,omitempty" type:"Struct"` + StartTime *int32 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + TargetArtifact *GetArtifactBuildTaskResponseBodyTargetArtifact `json:"TargetArtifact,omitempty" xml:"TargetArtifact,omitempty" type:"Struct"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBody) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBody) SetArtifactBuildType(v string) *GetArtifactBuildTaskResponseBody { + s.ArtifactBuildType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetBuildTaskId(v string) *GetArtifactBuildTaskResponseBody { + s.BuildTaskId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetCode(v string) *GetArtifactBuildTaskResponseBody { + s.Code = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetEndTime(v int32) *GetArtifactBuildTaskResponseBody { + s.EndTime = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetInstructions(v []*string) *GetArtifactBuildTaskResponseBody { + s.Instructions = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetIsSuccess(v bool) *GetArtifactBuildTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetRequestId(v string) *GetArtifactBuildTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetSourceArtifact(v *GetArtifactBuildTaskResponseBodySourceArtifact) *GetArtifactBuildTaskResponseBody { + s.SourceArtifact = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetStartTime(v int32) *GetArtifactBuildTaskResponseBody { + s.StartTime = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetTargetArtifact(v *GetArtifactBuildTaskResponseBodyTargetArtifact) *GetArtifactBuildTaskResponseBody { + s.TargetArtifact = v + return s +} + +func (s *GetArtifactBuildTaskResponseBody) SetTaskStatus(v string) *GetArtifactBuildTaskResponseBody { + s.TaskStatus = &v + return s +} + +type GetArtifactBuildTaskResponseBodySourceArtifact struct { + ArtifactType *string `json:"ArtifactType,omitempty" xml:"ArtifactType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBodySourceArtifact) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBodySourceArtifact) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetArtifactType(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.ArtifactType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetRepoId(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.RepoId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodySourceArtifact) SetVersion(v string) *GetArtifactBuildTaskResponseBodySourceArtifact { + s.Version = &v + return s +} + +type GetArtifactBuildTaskResponseBodyTargetArtifact struct { + ArtifactType *string `json:"ArtifactType,omitempty" xml:"ArtifactType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetArtifactBuildTaskResponseBodyTargetArtifact) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponseBodyTargetArtifact) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetArtifactType(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.ArtifactType = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetRepoId(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.RepoId = &v + return s +} + +func (s *GetArtifactBuildTaskResponseBodyTargetArtifact) SetVersion(v string) *GetArtifactBuildTaskResponseBodyTargetArtifact { + s.Version = &v + return s +} + +type GetArtifactBuildTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetArtifactBuildTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetArtifactBuildTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s GetArtifactBuildTaskResponse) GoString() string { + return s.String() +} + +func (s *GetArtifactBuildTaskResponse) SetHeaders(v map[string]*string) *GetArtifactBuildTaskResponse { + s.Headers = v + return s +} + +func (s *GetArtifactBuildTaskResponse) SetBody(v *GetArtifactBuildTaskResponseBody) *GetArtifactBuildTaskResponse { + s.Body = v + return s +} + +type GetAuthorizationTokenRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetAuthorizationTokenRequest) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenRequest) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenRequest) SetInstanceId(v string) *GetAuthorizationTokenRequest { + s.InstanceId = &v + return s +} + +type GetAuthorizationTokenResponseBody struct { + AuthorizationToken *string `json:"AuthorizationToken,omitempty" xml:"AuthorizationToken,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + ExpireTime *int64 `json:"ExpireTime,omitempty" xml:"ExpireTime,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TempUsername *string `json:"TempUsername,omitempty" xml:"TempUsername,omitempty"` +} + +func (s GetAuthorizationTokenResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponseBody) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponseBody) SetAuthorizationToken(v string) *GetAuthorizationTokenResponseBody { + s.AuthorizationToken = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetCode(v string) *GetAuthorizationTokenResponseBody { + s.Code = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetExpireTime(v int64) *GetAuthorizationTokenResponseBody { + s.ExpireTime = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetIsSuccess(v bool) *GetAuthorizationTokenResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetRequestId(v string) *GetAuthorizationTokenResponseBody { + s.RequestId = &v + return s +} + +func (s *GetAuthorizationTokenResponseBody) SetTempUsername(v string) *GetAuthorizationTokenResponseBody { + s.TempUsername = &v + return s +} + +type GetAuthorizationTokenResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetAuthorizationTokenResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetAuthorizationTokenResponse) String() string { + return tea.Prettify(s) +} + +func (s GetAuthorizationTokenResponse) GoString() string { + return s.String() +} + +func (s *GetAuthorizationTokenResponse) SetHeaders(v map[string]*string) *GetAuthorizationTokenResponse { + s.Headers = v + return s +} + +func (s *GetAuthorizationTokenResponse) SetBody(v *GetAuthorizationTokenResponseBody) *GetAuthorizationTokenResponse { + s.Body = v + return s +} + +type GetChainRequest struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetChainRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChainRequest) GoString() string { + return s.String() +} + +func (s *GetChainRequest) SetChainId(v string) *GetChainRequest { + s.ChainId = &v + return s +} + +func (s *GetChainRequest) SetInstanceId(v string) *GetChainRequest { + s.InstanceId = &v + return s +} + +type GetChainResponseBody struct { + ChainConfig *GetChainResponseBodyChainConfig `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty" type:"Struct"` + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ScopeId *string `json:"ScopeId,omitempty" xml:"ScopeId,omitempty"` + ScopeType *string `json:"ScopeType,omitempty" xml:"ScopeType,omitempty"` +} + +func (s GetChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBody) GoString() string { + return s.String() +} + +func (s *GetChainResponseBody) SetChainConfig(v *GetChainResponseBodyChainConfig) *GetChainResponseBody { + s.ChainConfig = v + return s +} + +func (s *GetChainResponseBody) SetChainId(v string) *GetChainResponseBody { + s.ChainId = &v + return s +} + +func (s *GetChainResponseBody) SetCode(v string) *GetChainResponseBody { + s.Code = &v + return s +} + +func (s *GetChainResponseBody) SetCreateTime(v int64) *GetChainResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetChainResponseBody) SetDescription(v string) *GetChainResponseBody { + s.Description = &v + return s +} + +func (s *GetChainResponseBody) SetInstanceId(v string) *GetChainResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChainResponseBody) SetIsSuccess(v bool) *GetChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChainResponseBody) SetModifiedTime(v int64) *GetChainResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetChainResponseBody) SetName(v string) *GetChainResponseBody { + s.Name = &v + return s +} + +func (s *GetChainResponseBody) SetRequestId(v string) *GetChainResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChainResponseBody) SetScopeId(v string) *GetChainResponseBody { + s.ScopeId = &v + return s +} + +func (s *GetChainResponseBody) SetScopeType(v string) *GetChainResponseBody { + s.ScopeType = &v + return s +} + +type GetChainResponseBodyChainConfig struct { + ChainConfigId *string `json:"ChainConfigId,omitempty" xml:"ChainConfigId,omitempty"` + IsActive *bool `json:"IsActive,omitempty" xml:"IsActive,omitempty"` + Nodes []*GetChainResponseBodyChainConfigNodes `json:"Nodes,omitempty" xml:"Nodes,omitempty" type:"Repeated"` + Routers []*GetChainResponseBodyChainConfigRouters `json:"Routers,omitempty" xml:"Routers,omitempty" type:"Repeated"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s GetChainResponseBodyChainConfig) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfig) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfig) SetChainConfigId(v string) *GetChainResponseBodyChainConfig { + s.ChainConfigId = &v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetIsActive(v bool) *GetChainResponseBodyChainConfig { + s.IsActive = &v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetNodes(v []*GetChainResponseBodyChainConfigNodes) *GetChainResponseBodyChainConfig { + s.Nodes = v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetRouters(v []*GetChainResponseBodyChainConfigRouters) *GetChainResponseBodyChainConfig { + s.Routers = v + return s +} + +func (s *GetChainResponseBodyChainConfig) SetVersion(v string) *GetChainResponseBodyChainConfig { + s.Version = &v + return s +} + +type GetChainResponseBodyChainConfigNodes struct { + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + NodeConfig *GetChainResponseBodyChainConfigNodesNodeConfig `json:"NodeConfig,omitempty" xml:"NodeConfig,omitempty" type:"Struct"` + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodes) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodes) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodes) SetEnable(v bool) *GetChainResponseBodyChainConfigNodes { + s.Enable = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodes) SetNodeConfig(v *GetChainResponseBodyChainConfigNodesNodeConfig) *GetChainResponseBodyChainConfigNodes { + s.NodeConfig = v + return s +} + +func (s *GetChainResponseBodyChainConfigNodes) SetNodeName(v string) *GetChainResponseBodyChainConfigNodes { + s.NodeName = &v + return s +} + +type GetChainResponseBodyChainConfigNodesNodeConfig struct { + DenyPolicy *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy `json:"DenyPolicy,omitempty" xml:"DenyPolicy,omitempty" type:"Struct"` + Retry *int32 `json:"Retry,omitempty" xml:"Retry,omitempty"` + ScanEngine *string `json:"ScanEngine,omitempty" xml:"ScanEngine,omitempty"` + Timeout *int64 `json:"Timeout,omitempty" xml:"Timeout,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfig) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfig) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetDenyPolicy(v *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.DenyPolicy = v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetRetry(v int32) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.Retry = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetScanEngine(v string) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.ScanEngine = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfig) SetTimeout(v int64) *GetChainResponseBodyChainConfigNodesNodeConfig { + s.Timeout = &v + return s +} + +type GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy struct { + Action *string `json:"Action,omitempty" xml:"Action,omitempty"` + IssueCount *string `json:"IssueCount,omitempty" xml:"IssueCount,omitempty"` + IssueLevel *string `json:"IssueLevel,omitempty" xml:"IssueLevel,omitempty"` + Logic *string `json:"Logic,omitempty" xml:"Logic,omitempty"` +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetAction(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.Action = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetIssueCount(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.IssueCount = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetIssueLevel(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.IssueLevel = &v + return s +} + +func (s *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy) SetLogic(v string) *GetChainResponseBodyChainConfigNodesNodeConfigDenyPolicy { + s.Logic = &v + return s +} + +type GetChainResponseBodyChainConfigRouters struct { + From *GetChainResponseBodyChainConfigRoutersFrom `json:"From,omitempty" xml:"From,omitempty" type:"Struct"` + To *GetChainResponseBodyChainConfigRoutersTo `json:"To,omitempty" xml:"To,omitempty" type:"Struct"` +} + +func (s GetChainResponseBodyChainConfigRouters) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRouters) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRouters) SetFrom(v *GetChainResponseBodyChainConfigRoutersFrom) *GetChainResponseBodyChainConfigRouters { + s.From = v + return s +} + +func (s *GetChainResponseBodyChainConfigRouters) SetTo(v *GetChainResponseBodyChainConfigRoutersTo) *GetChainResponseBodyChainConfigRouters { + s.To = v + return s +} + +type GetChainResponseBodyChainConfigRoutersFrom struct { + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigRoutersFrom) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRoutersFrom) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRoutersFrom) SetNodeName(v string) *GetChainResponseBodyChainConfigRoutersFrom { + s.NodeName = &v + return s +} + +type GetChainResponseBodyChainConfigRoutersTo struct { + NodeName *string `json:"NodeName,omitempty" xml:"NodeName,omitempty"` +} + +func (s GetChainResponseBodyChainConfigRoutersTo) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponseBodyChainConfigRoutersTo) GoString() string { + return s.String() +} + +func (s *GetChainResponseBodyChainConfigRoutersTo) SetNodeName(v string) *GetChainResponseBodyChainConfigRoutersTo { + s.NodeName = &v + return s +} + +type GetChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChainResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChainResponse) GoString() string { + return s.String() +} + +func (s *GetChainResponse) SetHeaders(v map[string]*string) *GetChainResponse { + s.Headers = v + return s +} + +func (s *GetChainResponse) SetBody(v *GetChainResponseBody) *GetChainResponse { + s.Body = v + return s +} + +type GetChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s GetChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceRequest) SetInstanceId(v string) *GetChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *GetChartNamespaceRequest) SetNamespaceName(v string) *GetChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type GetChartNamespaceResponseBody struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s GetChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceResponseBody) SetAutoCreateRepo(v bool) *GetChartNamespaceResponseBody { + s.AutoCreateRepo = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetCode(v string) *GetChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetDefaultRepoType(v string) *GetChartNamespaceResponseBody { + s.DefaultRepoType = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetInstanceId(v string) *GetChartNamespaceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetIsSuccess(v bool) *GetChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceId(v string) *GetChartNamespaceResponseBody { + s.NamespaceId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceName(v string) *GetChartNamespaceResponseBody { + s.NamespaceName = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetNamespaceStatus(v string) *GetChartNamespaceResponseBody { + s.NamespaceStatus = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetRequestId(v string) *GetChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChartNamespaceResponseBody) SetResourceGroupId(v string) *GetChartNamespaceResponseBody { + s.ResourceGroupId = &v + return s +} + +type GetChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetChartNamespaceResponse) SetHeaders(v map[string]*string) *GetChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *GetChartNamespaceResponse) SetBody(v *GetChartNamespaceResponseBody) *GetChartNamespaceResponse { + s.Body = v + return s +} + +type GetChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryRequest) SetInstanceId(v string) *GetChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *GetChartRepositoryRequest) SetRepoName(v string) *GetChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *GetChartRepositoryRequest) SetRepoNamespaceName(v string) *GetChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type GetChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s GetChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryResponseBody) SetCode(v string) *GetChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetCreateTime(v int64) *GetChartRepositoryResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetInstanceId(v string) *GetChartRepositoryResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetIsSuccess(v bool) *GetChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetModifiedTime(v int64) *GetChartRepositoryResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoId(v string) *GetChartRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoName(v string) *GetChartRepositoryResponseBody { + s.RepoName = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoNamespaceName(v string) *GetChartRepositoryResponseBody { + s.RepoNamespaceName = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoStatus(v string) *GetChartRepositoryResponseBody { + s.RepoStatus = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRepoType(v string) *GetChartRepositoryResponseBody { + s.RepoType = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetRequestId(v string) *GetChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetChartRepositoryResponseBody) SetSummary(v string) *GetChartRepositoryResponseBody { + s.Summary = &v + return s +} + +type GetChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *GetChartRepositoryResponse) SetHeaders(v map[string]*string) *GetChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *GetChartRepositoryResponse) SetBody(v *GetChartRepositoryResponseBody) *GetChartRepositoryResponse { + s.Body = v + return s +} + +type GetInstanceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceRequest) SetInstanceId(v string) *GetInstanceRequest { + s.InstanceId = &v + return s +} + +type GetInstanceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceSpecification *string `json:"InstanceSpecification,omitempty" xml:"InstanceSpecification,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s GetInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceResponseBody) SetCode(v string) *GetInstanceResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceResponseBody) SetCreateTime(v int64) *GetInstanceResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceId(v string) *GetInstanceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceName(v string) *GetInstanceResponseBody { + s.InstanceName = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceSpecification(v string) *GetInstanceResponseBody { + s.InstanceSpecification = &v + return s +} + +func (s *GetInstanceResponseBody) SetInstanceStatus(v string) *GetInstanceResponseBody { + s.InstanceStatus = &v + return s +} + +func (s *GetInstanceResponseBody) SetIsSuccess(v bool) *GetInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceResponseBody) SetModifiedTime(v int64) *GetInstanceResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetInstanceResponseBody) SetRequestId(v string) *GetInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetInstanceResponseBody) SetResourceGroupId(v string) *GetInstanceResponseBody { + s.ResourceGroupId = &v + return s +} + +type GetInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceResponse) SetHeaders(v map[string]*string) *GetInstanceResponse { + s.Headers = v + return s +} + +func (s *GetInstanceResponse) SetBody(v *GetInstanceResponseBody) *GetInstanceResponse { + s.Body = v + return s +} + +type GetInstanceCountResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Count *int32 `json:"Count,omitempty" xml:"Count,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceCountResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceCountResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceCountResponseBody) SetCode(v string) *GetInstanceCountResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetCount(v int32) *GetInstanceCountResponseBody { + s.Count = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetIsSuccess(v bool) *GetInstanceCountResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceCountResponseBody) SetRequestId(v string) *GetInstanceCountResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceCountResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceCountResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceCountResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceCountResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceCountResponse) SetHeaders(v map[string]*string) *GetInstanceCountResponse { + s.Headers = v + return s +} + +func (s *GetInstanceCountResponse) SetBody(v *GetInstanceCountResponseBody) *GetInstanceCountResponse { + s.Body = v + return s +} + +type GetInstanceEndpointRequest struct { + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s GetInstanceEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointRequest) SetEndpointType(v string) *GetInstanceEndpointRequest { + s.EndpointType = &v + return s +} + +func (s *GetInstanceEndpointRequest) SetInstanceId(v string) *GetInstanceEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *GetInstanceEndpointRequest) SetModuleName(v string) *GetInstanceEndpointRequest { + s.ModuleName = &v + return s +} + +type GetInstanceEndpointResponseBody struct { + AclEnable *bool `json:"AclEnable,omitempty" xml:"AclEnable,omitempty"` + AclEntries []*GetInstanceEndpointResponseBodyAclEntries `json:"AclEntries,omitempty" xml:"AclEntries,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Domains []*GetInstanceEndpointResponseBodyDomains `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetInstanceEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBody) SetAclEnable(v bool) *GetInstanceEndpointResponseBody { + s.AclEnable = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetAclEntries(v []*GetInstanceEndpointResponseBodyAclEntries) *GetInstanceEndpointResponseBody { + s.AclEntries = v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetCode(v string) *GetInstanceEndpointResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetDomains(v []*GetInstanceEndpointResponseBodyDomains) *GetInstanceEndpointResponseBody { + s.Domains = v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetEnable(v bool) *GetInstanceEndpointResponseBody { + s.Enable = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetIsSuccess(v bool) *GetInstanceEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetRequestId(v string) *GetInstanceEndpointResponseBody { + s.RequestId = &v + return s +} + +func (s *GetInstanceEndpointResponseBody) SetStatus(v string) *GetInstanceEndpointResponseBody { + s.Status = &v + return s +} + +type GetInstanceEndpointResponseBodyAclEntries struct { + Comment *string `json:"Comment,omitempty" xml:"Comment,omitempty"` + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` +} + +func (s GetInstanceEndpointResponseBodyAclEntries) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBodyAclEntries) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBodyAclEntries) SetComment(v string) *GetInstanceEndpointResponseBodyAclEntries { + s.Comment = &v + return s +} + +func (s *GetInstanceEndpointResponseBodyAclEntries) SetEntry(v string) *GetInstanceEndpointResponseBodyAclEntries { + s.Entry = &v + return s +} + +type GetInstanceEndpointResponseBodyDomains struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` + Type *string `json:"Type,omitempty" xml:"Type,omitempty"` +} + +func (s GetInstanceEndpointResponseBodyDomains) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponseBodyDomains) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponseBodyDomains) SetDomain(v string) *GetInstanceEndpointResponseBodyDomains { + s.Domain = &v + return s +} + +func (s *GetInstanceEndpointResponseBodyDomains) SetType(v string) *GetInstanceEndpointResponseBodyDomains { + s.Type = &v + return s +} + +type GetInstanceEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceEndpointResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceEndpointResponse) SetHeaders(v map[string]*string) *GetInstanceEndpointResponse { + s.Headers = v + return s +} + +func (s *GetInstanceEndpointResponse) SetBody(v *GetInstanceEndpointResponseBody) *GetInstanceEndpointResponse { + s.Body = v + return s +} + +type GetInstanceUsageRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetInstanceUsageRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageRequest) SetInstanceId(v string) *GetInstanceUsageRequest { + s.InstanceId = &v + return s +} + +type GetInstanceUsageResponseBody struct { + ChartNamespaceQuota *string `json:"ChartNamespaceQuota,omitempty" xml:"ChartNamespaceQuota,omitempty"` + ChartNamespaceUsage *string `json:"ChartNamespaceUsage,omitempty" xml:"ChartNamespaceUsage,omitempty"` + ChartRepoQuota *string `json:"ChartRepoQuota,omitempty" xml:"ChartRepoQuota,omitempty"` + ChartRepoUsage *string `json:"ChartRepoUsage,omitempty" xml:"ChartRepoUsage,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceQuota *string `json:"NamespaceQuota,omitempty" xml:"NamespaceQuota,omitempty"` + NamespaceUsage *string `json:"NamespaceUsage,omitempty" xml:"NamespaceUsage,omitempty"` + RepoQuota *string `json:"RepoQuota,omitempty" xml:"RepoQuota,omitempty"` + RepoUsage *string `json:"RepoUsage,omitempty" xml:"RepoUsage,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceUsageResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageResponseBody) SetChartNamespaceQuota(v string) *GetInstanceUsageResponseBody { + s.ChartNamespaceQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartNamespaceUsage(v string) *GetInstanceUsageResponseBody { + s.ChartNamespaceUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartRepoQuota(v string) *GetInstanceUsageResponseBody { + s.ChartRepoQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetChartRepoUsage(v string) *GetInstanceUsageResponseBody { + s.ChartRepoUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetCode(v string) *GetInstanceUsageResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetIsSuccess(v bool) *GetInstanceUsageResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetNamespaceQuota(v string) *GetInstanceUsageResponseBody { + s.NamespaceQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetNamespaceUsage(v string) *GetInstanceUsageResponseBody { + s.NamespaceUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRepoQuota(v string) *GetInstanceUsageResponseBody { + s.RepoQuota = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRepoUsage(v string) *GetInstanceUsageResponseBody { + s.RepoUsage = &v + return s +} + +func (s *GetInstanceUsageResponseBody) SetRequestId(v string) *GetInstanceUsageResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceUsageResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceUsageResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceUsageResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceUsageResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceUsageResponse) SetHeaders(v map[string]*string) *GetInstanceUsageResponse { + s.Headers = v + return s +} + +func (s *GetInstanceUsageResponse) SetBody(v *GetInstanceUsageResponseBody) *GetInstanceUsageResponse { + s.Body = v + return s +} + +type GetInstanceVpcEndpointRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s GetInstanceVpcEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointRequest) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointRequest) SetInstanceId(v string) *GetInstanceVpcEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *GetInstanceVpcEndpointRequest) SetModuleName(v string) *GetInstanceVpcEndpointRequest { + s.ModuleName = &v + return s +} + +type GetInstanceVpcEndpointResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Domains []*string `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LinkedVpcs []*GetInstanceVpcEndpointResponseBodyLinkedVpcs `json:"LinkedVpcs,omitempty" xml:"LinkedVpcs,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetInstanceVpcEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponseBody) SetCode(v string) *GetInstanceVpcEndpointResponseBody { + s.Code = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetDomains(v []*string) *GetInstanceVpcEndpointResponseBody { + s.Domains = v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetEnable(v bool) *GetInstanceVpcEndpointResponseBody { + s.Enable = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetIsSuccess(v bool) *GetInstanceVpcEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetLinkedVpcs(v []*GetInstanceVpcEndpointResponseBodyLinkedVpcs) *GetInstanceVpcEndpointResponseBody { + s.LinkedVpcs = v + return s +} + +func (s *GetInstanceVpcEndpointResponseBody) SetRequestId(v string) *GetInstanceVpcEndpointResponseBody { + s.RequestId = &v + return s +} + +type GetInstanceVpcEndpointResponseBodyLinkedVpcs struct { + DefaultAccess *bool `json:"DefaultAccess,omitempty" xml:"DefaultAccess,omitempty"` + Ip *string `json:"Ip,omitempty" xml:"Ip,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` + VswitchId *string `json:"VswitchId,omitempty" xml:"VswitchId,omitempty"` +} + +func (s GetInstanceVpcEndpointResponseBodyLinkedVpcs) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponseBodyLinkedVpcs) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetDefaultAccess(v bool) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.DefaultAccess = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetIp(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.Ip = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetStatus(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.Status = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetVpcId(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.VpcId = &v + return s +} + +func (s *GetInstanceVpcEndpointResponseBodyLinkedVpcs) SetVswitchId(v string) *GetInstanceVpcEndpointResponseBodyLinkedVpcs { + s.VswitchId = &v + return s +} + +type GetInstanceVpcEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetInstanceVpcEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetInstanceVpcEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s GetInstanceVpcEndpointResponse) GoString() string { + return s.String() +} + +func (s *GetInstanceVpcEndpointResponse) SetHeaders(v map[string]*string) *GetInstanceVpcEndpointResponse { + s.Headers = v + return s +} + +func (s *GetInstanceVpcEndpointResponse) SetBody(v *GetInstanceVpcEndpointResponseBody) *GetInstanceVpcEndpointResponse { + s.Body = v + return s +} + +type GetNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s GetNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceRequest) GoString() string { + return s.String() +} + +func (s *GetNamespaceRequest) SetInstanceId(v string) *GetNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *GetNamespaceRequest) SetNamespaceId(v string) *GetNamespaceRequest { + s.NamespaceId = &v + return s +} + +func (s *GetNamespaceRequest) SetNamespaceName(v string) *GetNamespaceRequest { + s.NamespaceName = &v + return s +} + +type GetNamespaceResponseBody struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tags []*GetNamespaceResponseBodyTags `json:"Tags,omitempty" xml:"Tags,omitempty" type:"Repeated"` +} + +func (s GetNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponseBody) SetAutoCreateRepo(v bool) *GetNamespaceResponseBody { + s.AutoCreateRepo = &v + return s +} + +func (s *GetNamespaceResponseBody) SetCode(v string) *GetNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *GetNamespaceResponseBody) SetDefaultRepoType(v string) *GetNamespaceResponseBody { + s.DefaultRepoType = &v + return s +} + +func (s *GetNamespaceResponseBody) SetInstanceId(v string) *GetNamespaceResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetIsSuccess(v bool) *GetNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceId(v string) *GetNamespaceResponseBody { + s.NamespaceId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceName(v string) *GetNamespaceResponseBody { + s.NamespaceName = &v + return s +} + +func (s *GetNamespaceResponseBody) SetNamespaceStatus(v string) *GetNamespaceResponseBody { + s.NamespaceStatus = &v + return s +} + +func (s *GetNamespaceResponseBody) SetRequestId(v string) *GetNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetResourceGroupId(v string) *GetNamespaceResponseBody { + s.ResourceGroupId = &v + return s +} + +func (s *GetNamespaceResponseBody) SetTags(v []*GetNamespaceResponseBodyTags) *GetNamespaceResponseBody { + s.Tags = v + return s +} + +type GetNamespaceResponseBodyTags struct { + TagKey *string `json:"TagKey,omitempty" xml:"TagKey,omitempty"` + TagValue *string `json:"TagValue,omitempty" xml:"TagValue,omitempty"` +} + +func (s GetNamespaceResponseBodyTags) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponseBodyTags) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponseBodyTags) SetTagKey(v string) *GetNamespaceResponseBodyTags { + s.TagKey = &v + return s +} + +func (s *GetNamespaceResponseBodyTags) SetTagValue(v string) *GetNamespaceResponseBodyTags { + s.TagValue = &v + return s +} + +type GetNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s GetNamespaceResponse) GoString() string { + return s.String() +} + +func (s *GetNamespaceResponse) SetHeaders(v map[string]*string) *GetNamespaceResponse { + s.Headers = v + return s +} + +func (s *GetNamespaceResponse) SetBody(v *GetNamespaceResponseBody) *GetNamespaceResponse { + s.Body = v + return s +} + +type GetRepoBuildRecordRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s GetRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordRequest) SetBuildRecordId(v string) *GetRepoBuildRecordRequest { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordRequest) SetInstanceId(v string) *GetRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +type GetRepoBuildRecordResponseBody struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + EndTime *int64 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Image *GetRepoBuildRecordResponseBodyImage `json:"Image,omitempty" xml:"Image,omitempty" type:"Struct"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + StartTime *int64 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponseBody) SetBuildRecordId(v string) *GetRepoBuildRecordResponseBody { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetCode(v string) *GetRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetEndTime(v int64) *GetRepoBuildRecordResponseBody { + s.EndTime = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetImage(v *GetRepoBuildRecordResponseBodyImage) *GetRepoBuildRecordResponseBody { + s.Image = v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetIsSuccess(v bool) *GetRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetRequestId(v string) *GetRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetStartTime(v int64) *GetRepoBuildRecordResponseBody { + s.StartTime = &v + return s +} + +func (s *GetRepoBuildRecordResponseBody) SetStatus(v string) *GetRepoBuildRecordResponseBody { + s.Status = &v + return s +} + +type GetRepoBuildRecordResponseBodyImage struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoBuildRecordResponseBodyImage) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponseBodyImage) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetImageTag(v string) *GetRepoBuildRecordResponseBodyImage { + s.ImageTag = &v + return s +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetRepoName(v string) *GetRepoBuildRecordResponseBodyImage { + s.RepoName = &v + return s +} + +func (s *GetRepoBuildRecordResponseBodyImage) SetRepoNamespaceName(v string) *GetRepoBuildRecordResponseBodyImage { + s.RepoNamespaceName = &v + return s +} + +type GetRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordResponse) SetHeaders(v map[string]*string) *GetRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *GetRepoBuildRecordResponse) SetBody(v *GetRepoBuildRecordResponseBody) *GetRepoBuildRecordResponse { + s.Body = v + return s +} + +type GetRepoBuildRecordStatusRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s GetRepoBuildRecordStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusRequest) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusRequest) SetBuildRecordId(v string) *GetRepoBuildRecordStatusRequest { + s.BuildRecordId = &v + return s +} + +func (s *GetRepoBuildRecordStatusRequest) SetInstanceId(v string) *GetRepoBuildRecordStatusRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoBuildRecordStatusRequest) SetRepoId(v string) *GetRepoBuildRecordStatusRequest { + s.RepoId = &v + return s +} + +type GetRepoBuildRecordStatusResponseBody struct { + BuildStatus *string `json:"BuildStatus,omitempty" xml:"BuildStatus,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoBuildRecordStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetBuildStatus(v string) *GetRepoBuildRecordStatusResponseBody { + s.BuildStatus = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetCode(v string) *GetRepoBuildRecordStatusResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetIsSuccess(v bool) *GetRepoBuildRecordStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoBuildRecordStatusResponseBody) SetRequestId(v string) *GetRepoBuildRecordStatusResponseBody { + s.RequestId = &v + return s +} + +type GetRepoBuildRecordStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoBuildRecordStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoBuildRecordStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoBuildRecordStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoBuildRecordStatusResponse) SetHeaders(v map[string]*string) *GetRepoBuildRecordStatusResponse { + s.Headers = v + return s +} + +func (s *GetRepoBuildRecordStatusResponse) SetBody(v *GetRepoBuildRecordStatusResponseBody) *GetRepoBuildRecordStatusResponse { + s.Body = v + return s +} + +type GetRepoSourceCodeRepoRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s GetRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoRequest) SetInstanceId(v string) *GetRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoSourceCodeRepoRequest) SetRepoId(v string) *GetRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type GetRepoSourceCodeRepoResponseBody struct { + AutoBuild *string `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CodeRepoDomain *string `json:"CodeRepoDomain,omitempty" xml:"CodeRepoDomain,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *string `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + OverseaBuild *string `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetAutoBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.AutoBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCode(v string) *GetRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoDomain(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoDomain = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoName(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoName = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoNamespaceName(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetCodeRepoType(v string) *GetRepoSourceCodeRepoResponseBody { + s.CodeRepoType = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetDisableCacheBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.DisableCacheBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *GetRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetOverseaBuild(v string) *GetRepoSourceCodeRepoResponseBody { + s.OverseaBuild = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetRepoId(v string) *GetRepoSourceCodeRepoResponseBody { + s.RepoId = &v + return s +} + +func (s *GetRepoSourceCodeRepoResponseBody) SetRequestId(v string) *GetRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type GetRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *GetRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *GetRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *GetRepoSourceCodeRepoResponse) SetBody(v *GetRepoSourceCodeRepoResponseBody) *GetRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type GetRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` +} + +func (s GetRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskRequest) SetInstanceId(v string) *GetRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskRequest) SetSyncTaskId(v string) *GetRepoSyncTaskRequest { + s.SyncTaskId = &v + return s +} + +type GetRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + ImageFrom *GetRepoSyncTaskResponseBodyImageFrom `json:"ImageFrom,omitempty" xml:"ImageFrom,omitempty" type:"Struct"` + ImageTo *GetRepoSyncTaskResponseBodyImageTo `json:"ImageTo,omitempty" xml:"ImageTo,omitempty" type:"Struct"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LayerTasks []*GetRepoSyncTaskResponseBodyLayerTasks `json:"LayerTasks,omitempty" xml:"LayerTasks,omitempty" type:"Repeated"` + Progress *int64 `json:"Progress,omitempty" xml:"Progress,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncBatchTaskId *string `json:"SyncBatchTaskId,omitempty" xml:"SyncBatchTaskId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` + SyncTransAccelerate *bool `json:"SyncTransAccelerate,omitempty" xml:"SyncTransAccelerate,omitempty"` + SyncedSize *int64 `json:"SyncedSize,omitempty" xml:"SyncedSize,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` + TaskTrigger *string `json:"TaskTrigger,omitempty" xml:"TaskTrigger,omitempty"` +} + +func (s GetRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBody) SetCode(v string) *GetRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetCrossUser(v bool) *GetRepoSyncTaskResponseBody { + s.CrossUser = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetImageFrom(v *GetRepoSyncTaskResponseBodyImageFrom) *GetRepoSyncTaskResponseBody { + s.ImageFrom = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetImageTo(v *GetRepoSyncTaskResponseBodyImageTo) *GetRepoSyncTaskResponseBody { + s.ImageTo = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetIsSuccess(v bool) *GetRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetLayerTasks(v []*GetRepoSyncTaskResponseBodyLayerTasks) *GetRepoSyncTaskResponseBody { + s.LayerTasks = v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetProgress(v int64) *GetRepoSyncTaskResponseBody { + s.Progress = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetRequestId(v string) *GetRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncBatchTaskId(v string) *GetRepoSyncTaskResponseBody { + s.SyncBatchTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncRuleId(v string) *GetRepoSyncTaskResponseBody { + s.SyncRuleId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncTaskId(v string) *GetRepoSyncTaskResponseBody { + s.SyncTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncTransAccelerate(v bool) *GetRepoSyncTaskResponseBody { + s.SyncTransAccelerate = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetSyncedSize(v int64) *GetRepoSyncTaskResponseBody { + s.SyncedSize = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetTaskStatus(v string) *GetRepoSyncTaskResponseBody { + s.TaskStatus = &v + return s +} + +func (s *GetRepoSyncTaskResponseBody) SetTaskTrigger(v string) *GetRepoSyncTaskResponseBody { + s.TaskTrigger = &v + return s +} + +type GetRepoSyncTaskResponseBodyImageFrom struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyImageFrom) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyImageFrom) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetImageTag(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.ImageTag = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetInstanceId(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRegionId(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RegionId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRepoName(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RepoName = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageFrom) SetRepoNamespaceName(v string) *GetRepoSyncTaskResponseBodyImageFrom { + s.RepoNamespaceName = &v + return s +} + +type GetRepoSyncTaskResponseBodyImageTo struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyImageTo) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyImageTo) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetImageTag(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.ImageTag = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetInstanceId(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.InstanceId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRegionId(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RegionId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRepoName(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RepoName = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyImageTo) SetRepoNamespaceName(v string) *GetRepoSyncTaskResponseBodyImageTo { + s.RepoNamespaceName = &v + return s +} + +type GetRepoSyncTaskResponseBodyLayerTasks struct { + ArtifactDigest *string `json:"ArtifactDigest,omitempty" xml:"ArtifactDigest,omitempty"` + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` + SyncLayerTaskId *string `json:"SyncLayerTaskId,omitempty" xml:"SyncLayerTaskId,omitempty"` + SyncedSize *int64 `json:"SyncedSize,omitempty" xml:"SyncedSize,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` +} + +func (s GetRepoSyncTaskResponseBodyLayerTasks) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponseBodyLayerTasks) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetArtifactDigest(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.ArtifactDigest = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetDigest(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.Digest = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSize(v int64) *GetRepoSyncTaskResponseBodyLayerTasks { + s.Size = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSyncLayerTaskId(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.SyncLayerTaskId = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetSyncedSize(v int64) *GetRepoSyncTaskResponseBodyLayerTasks { + s.SyncedSize = &v + return s +} + +func (s *GetRepoSyncTaskResponseBodyLayerTasks) SetTaskStatus(v string) *GetRepoSyncTaskResponseBodyLayerTasks { + s.TaskStatus = &v + return s +} + +type GetRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *GetRepoSyncTaskResponse) SetHeaders(v map[string]*string) *GetRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *GetRepoSyncTaskResponse) SetBody(v *GetRepoSyncTaskResponseBody) *GetRepoSyncTaskResponse { + s.Body = v + return s +} + +type GetRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagRequest) SetInstanceId(v string) *GetRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagRequest) SetRepoId(v string) *GetRepoTagRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagRequest) SetTag(v string) *GetRepoTagRequest { + s.Tag = &v + return s +} + +type GetRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + ImageCreate *int64 `json:"ImageCreate,omitempty" xml:"ImageCreate,omitempty"` + ImageId *string `json:"ImageId,omitempty" xml:"ImageId,omitempty"` + ImageSize *int64 `json:"ImageSize,omitempty" xml:"ImageSize,omitempty"` + ImageUpdate *int64 `json:"ImageUpdate,omitempty" xml:"ImageUpdate,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponseBody) SetCode(v string) *GetRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagResponseBody) SetDigest(v string) *GetRepoTagResponseBody { + s.Digest = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageCreate(v int64) *GetRepoTagResponseBody { + s.ImageCreate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageId(v string) *GetRepoTagResponseBody { + s.ImageId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageSize(v int64) *GetRepoTagResponseBody { + s.ImageSize = &v + return s +} + +func (s *GetRepoTagResponseBody) SetImageUpdate(v int64) *GetRepoTagResponseBody { + s.ImageUpdate = &v + return s +} + +func (s *GetRepoTagResponseBody) SetIsSuccess(v bool) *GetRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagResponseBody) SetRequestId(v string) *GetRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagResponseBody) SetStatus(v string) *GetRepoTagResponseBody { + s.Status = &v + return s +} + +func (s *GetRepoTagResponseBody) SetTag(v string) *GetRepoTagResponseBody { + s.Tag = &v + return s +} + +type GetRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagResponse) SetHeaders(v map[string]*string) *GetRepoTagResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagResponse) SetBody(v *GetRepoTagResponseBody) *GetRepoTagResponse { + s.Body = v + return s +} + +type GetRepoTagLayersRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagLayersRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersRequest) SetDigest(v string) *GetRepoTagLayersRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetInstanceId(v string) *GetRepoTagLayersRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetRepoId(v string) *GetRepoTagLayersRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagLayersRequest) SetTag(v string) *GetRepoTagLayersRequest { + s.Tag = &v + return s +} + +type GetRepoTagLayersResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Layers []*GetRepoTagLayersResponseBodyLayers `json:"Layers,omitempty" xml:"Layers,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoTagLayersResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponseBody) SetCode(v string) *GetRepoTagLayersResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetIsSuccess(v bool) *GetRepoTagLayersResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetLayers(v []*GetRepoTagLayersResponseBodyLayers) *GetRepoTagLayersResponseBody { + s.Layers = v + return s +} + +func (s *GetRepoTagLayersResponseBody) SetRequestId(v string) *GetRepoTagLayersResponseBody { + s.RequestId = &v + return s +} + +type GetRepoTagLayersResponseBodyLayers struct { + BlobDigest *string `json:"BlobDigest,omitempty" xml:"BlobDigest,omitempty"` + BlobSize *int64 `json:"BlobSize,omitempty" xml:"BlobSize,omitempty"` + LayerCMD *string `json:"LayerCMD,omitempty" xml:"LayerCMD,omitempty"` + LayerIndex *int32 `json:"LayerIndex,omitempty" xml:"LayerIndex,omitempty"` + LayerInstruction *string `json:"LayerInstruction,omitempty" xml:"LayerInstruction,omitempty"` +} + +func (s GetRepoTagLayersResponseBodyLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponseBodyLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetBlobDigest(v string) *GetRepoTagLayersResponseBodyLayers { + s.BlobDigest = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetBlobSize(v int64) *GetRepoTagLayersResponseBodyLayers { + s.BlobSize = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerCMD(v string) *GetRepoTagLayersResponseBodyLayers { + s.LayerCMD = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerIndex(v int32) *GetRepoTagLayersResponseBodyLayers { + s.LayerIndex = &v + return s +} + +func (s *GetRepoTagLayersResponseBodyLayers) SetLayerInstruction(v string) *GetRepoTagLayersResponseBodyLayers { + s.LayerInstruction = &v + return s +} + +type GetRepoTagLayersResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagLayersResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagLayersResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagLayersResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagLayersResponse) SetHeaders(v map[string]*string) *GetRepoTagLayersResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagLayersResponse) SetBody(v *GetRepoTagLayersResponseBody) *GetRepoTagLayersResponse { + s.Body = v + return s +} + +type GetRepoTagManifestRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagManifestRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestRequest) SetInstanceId(v string) *GetRepoTagManifestRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetRepoId(v string) *GetRepoTagManifestRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetSchemaVersion(v int32) *GetRepoTagManifestRequest { + s.SchemaVersion = &v + return s +} + +func (s *GetRepoTagManifestRequest) SetTag(v string) *GetRepoTagManifestRequest { + s.Tag = &v + return s +} + +type GetRepoTagManifestResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Manifest *GetRepoTagManifestResponseBodyManifest `json:"Manifest,omitempty" xml:"Manifest,omitempty" type:"Struct"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s GetRepoTagManifestResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBody) SetCode(v string) *GetRepoTagManifestResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetIsSuccess(v bool) *GetRepoTagManifestResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetManifest(v *GetRepoTagManifestResponseBodyManifest) *GetRepoTagManifestResponseBody { + s.Manifest = v + return s +} + +func (s *GetRepoTagManifestResponseBody) SetRequestId(v string) *GetRepoTagManifestResponseBody { + s.RequestId = &v + return s +} + +type GetRepoTagManifestResponseBodyManifest struct { + Architecture *string `json:"Architecture,omitempty" xml:"Architecture,omitempty"` + Config *GetRepoTagManifestResponseBodyManifestConfig `json:"Config,omitempty" xml:"Config,omitempty" type:"Struct"` + FsLayers []*GetRepoTagManifestResponseBodyManifestFsLayers `json:"FsLayers,omitempty" xml:"FsLayers,omitempty" type:"Repeated"` + History []*GetRepoTagManifestResponseBodyManifestHistory `json:"History,omitempty" xml:"History,omitempty" type:"Repeated"` + Layers []*GetRepoTagManifestResponseBodyManifestLayers `json:"Layers,omitempty" xml:"Layers,omitempty" type:"Repeated"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + SchemaVersion *int32 `json:"SchemaVersion,omitempty" xml:"SchemaVersion,omitempty"` + Signatures []*GetRepoTagManifestResponseBodyManifestSignatures `json:"Signatures,omitempty" xml:"Signatures,omitempty" type:"Repeated"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifest) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetArchitecture(v string) *GetRepoTagManifestResponseBodyManifest { + s.Architecture = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetConfig(v *GetRepoTagManifestResponseBodyManifestConfig) *GetRepoTagManifestResponseBodyManifest { + s.Config = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetFsLayers(v []*GetRepoTagManifestResponseBodyManifestFsLayers) *GetRepoTagManifestResponseBodyManifest { + s.FsLayers = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetHistory(v []*GetRepoTagManifestResponseBodyManifestHistory) *GetRepoTagManifestResponseBodyManifest { + s.History = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetLayers(v []*GetRepoTagManifestResponseBodyManifestLayers) *GetRepoTagManifestResponseBodyManifest { + s.Layers = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifest { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetName(v string) *GetRepoTagManifestResponseBodyManifest { + s.Name = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetSchemaVersion(v int32) *GetRepoTagManifestResponseBodyManifest { + s.SchemaVersion = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetSignatures(v []*GetRepoTagManifestResponseBodyManifestSignatures) *GetRepoTagManifestResponseBodyManifest { + s.Signatures = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifest) SetTag(v string) *GetRepoTagManifestResponseBodyManifest { + s.Tag = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestConfig struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestConfig) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestConfig) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetDigest(v string) *GetRepoTagManifestResponseBodyManifestConfig { + s.Digest = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifestConfig { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestConfig) SetSize(v int64) *GetRepoTagManifestResponseBodyManifestConfig { + s.Size = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestFsLayers struct { + BlobSum *string `json:"BlobSum,omitempty" xml:"BlobSum,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestFsLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestFsLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestFsLayers) SetBlobSum(v string) *GetRepoTagManifestResponseBodyManifestFsLayers { + s.BlobSum = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestHistory struct { + V1Compatibility map[string]interface{} `json:"V1Compatibility,omitempty" xml:"V1Compatibility,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestHistory) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestHistory) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestHistory) SetV1Compatibility(v map[string]interface{}) *GetRepoTagManifestResponseBodyManifestHistory { + s.V1Compatibility = v + return s +} + +type GetRepoTagManifestResponseBodyManifestLayers struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + MediaType *string `json:"MediaType,omitempty" xml:"MediaType,omitempty"` + Size *int64 `json:"Size,omitempty" xml:"Size,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestLayers) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestLayers) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetDigest(v string) *GetRepoTagManifestResponseBodyManifestLayers { + s.Digest = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetMediaType(v string) *GetRepoTagManifestResponseBodyManifestLayers { + s.MediaType = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestLayers) SetSize(v int64) *GetRepoTagManifestResponseBodyManifestLayers { + s.Size = &v + return s +} + +type GetRepoTagManifestResponseBodyManifestSignatures struct { + Header map[string]interface{} `json:"Header,omitempty" xml:"Header,omitempty"` + Protected *string `json:"Protected,omitempty" xml:"Protected,omitempty"` + Signature *string `json:"Signature,omitempty" xml:"Signature,omitempty"` +} + +func (s GetRepoTagManifestResponseBodyManifestSignatures) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponseBodyManifestSignatures) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetHeader(v map[string]interface{}) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Header = v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetProtected(v string) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Protected = &v + return s +} + +func (s *GetRepoTagManifestResponseBodyManifestSignatures) SetSignature(v string) *GetRepoTagManifestResponseBodyManifestSignatures { + s.Signature = &v + return s +} + +type GetRepoTagManifestResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagManifestResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagManifestResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagManifestResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagManifestResponse) SetHeaders(v map[string]*string) *GetRepoTagManifestResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagManifestResponse) SetBody(v *GetRepoTagManifestResponseBody) *GetRepoTagManifestResponse { + s.Body = v + return s +} + +type GetRepoTagScanStatusRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagScanStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusRequest) SetDigest(v string) *GetRepoTagScanStatusRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetInstanceId(v string) *GetRepoTagScanStatusRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetRepoId(v string) *GetRepoTagScanStatusRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetScanTaskId(v string) *GetRepoTagScanStatusRequest { + s.ScanTaskId = &v + return s +} + +func (s *GetRepoTagScanStatusRequest) SetTag(v string) *GetRepoTagScanStatusRequest { + s.Tag = &v + return s +} + +type GetRepoTagScanStatusResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ScanService *string `json:"ScanService,omitempty" xml:"ScanService,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s GetRepoTagScanStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponseBody) SetCode(v string) *GetRepoTagScanStatusResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetIsSuccess(v bool) *GetRepoTagScanStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetRequestId(v string) *GetRepoTagScanStatusResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetScanService(v string) *GetRepoTagScanStatusResponseBody { + s.ScanService = &v + return s +} + +func (s *GetRepoTagScanStatusResponseBody) SetStatus(v string) *GetRepoTagScanStatusResponseBody { + s.Status = &v + return s +} + +type GetRepoTagScanStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagScanStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagScanStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanStatusResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanStatusResponse) SetHeaders(v map[string]*string) *GetRepoTagScanStatusResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagScanStatusResponse) SetBody(v *GetRepoTagScanStatusResponseBody) *GetRepoTagScanStatusResponse { + s.Body = v + return s +} + +type GetRepoTagScanSummaryRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s GetRepoTagScanSummaryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryRequest) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryRequest) SetDigest(v string) *GetRepoTagScanSummaryRequest { + s.Digest = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetInstanceId(v string) *GetRepoTagScanSummaryRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetRepoId(v string) *GetRepoTagScanSummaryRequest { + s.RepoId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetScanTaskId(v string) *GetRepoTagScanSummaryRequest { + s.ScanTaskId = &v + return s +} + +func (s *GetRepoTagScanSummaryRequest) SetTag(v string) *GetRepoTagScanSummaryRequest { + s.Tag = &v + return s +} + +type GetRepoTagScanSummaryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + HighSeverity *int32 `json:"HighSeverity,omitempty" xml:"HighSeverity,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + LowSeverity *int32 `json:"LowSeverity,omitempty" xml:"LowSeverity,omitempty"` + MediumSeverity *int32 `json:"MediumSeverity,omitempty" xml:"MediumSeverity,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalSeverity *int32 `json:"TotalSeverity,omitempty" xml:"TotalSeverity,omitempty"` + UnknownSeverity *int32 `json:"UnknownSeverity,omitempty" xml:"UnknownSeverity,omitempty"` +} + +func (s GetRepoTagScanSummaryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponseBody) SetCode(v string) *GetRepoTagScanSummaryResponseBody { + s.Code = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetHighSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.HighSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetIsSuccess(v bool) *GetRepoTagScanSummaryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetLowSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.LowSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetMediumSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.MediumSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetRequestId(v string) *GetRepoTagScanSummaryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetTotalSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.TotalSeverity = &v + return s +} + +func (s *GetRepoTagScanSummaryResponseBody) SetUnknownSeverity(v int32) *GetRepoTagScanSummaryResponseBody { + s.UnknownSeverity = &v + return s +} + +type GetRepoTagScanSummaryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepoTagScanSummaryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepoTagScanSummaryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepoTagScanSummaryResponse) GoString() string { + return s.String() +} + +func (s *GetRepoTagScanSummaryResponse) SetHeaders(v map[string]*string) *GetRepoTagScanSummaryResponse { + s.Headers = v + return s +} + +func (s *GetRepoTagScanSummaryResponse) SetBody(v *GetRepoTagScanSummaryResponseBody) *GetRepoTagScanSummaryResponse { + s.Body = v + return s +} + +type GetRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s GetRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryRequest) GoString() string { + return s.String() +} + +func (s *GetRepositoryRequest) SetInstanceId(v string) *GetRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoId(v string) *GetRepositoryRequest { + s.RepoId = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoName(v string) *GetRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *GetRepositoryRequest) SetRepoNamespaceName(v string) *GetRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +type GetRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoBuildType *string `json:"RepoBuildType,omitempty" xml:"RepoBuildType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s GetRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *GetRepositoryResponseBody) SetCode(v string) *GetRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *GetRepositoryResponseBody) SetCreateTime(v int64) *GetRepositoryResponseBody { + s.CreateTime = &v + return s +} + +func (s *GetRepositoryResponseBody) SetDetail(v string) *GetRepositoryResponseBody { + s.Detail = &v + return s +} + +func (s *GetRepositoryResponseBody) SetInstanceId(v string) *GetRepositoryResponseBody { + s.InstanceId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetIsSuccess(v bool) *GetRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *GetRepositoryResponseBody) SetModifiedTime(v int64) *GetRepositoryResponseBody { + s.ModifiedTime = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoBuildType(v string) *GetRepositoryResponseBody { + s.RepoBuildType = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoId(v string) *GetRepositoryResponseBody { + s.RepoId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoName(v string) *GetRepositoryResponseBody { + s.RepoName = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoNamespaceName(v string) *GetRepositoryResponseBody { + s.RepoNamespaceName = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoStatus(v string) *GetRepositoryResponseBody { + s.RepoStatus = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRepoType(v string) *GetRepositoryResponseBody { + s.RepoType = &v + return s +} + +func (s *GetRepositoryResponseBody) SetRequestId(v string) *GetRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetResourceGroupId(v string) *GetRepositoryResponseBody { + s.ResourceGroupId = &v + return s +} + +func (s *GetRepositoryResponseBody) SetSummary(v string) *GetRepositoryResponseBody { + s.Summary = &v + return s +} + +func (s *GetRepositoryResponseBody) SetTagImmutability(v bool) *GetRepositoryResponseBody { + s.TagImmutability = &v + return s +} + +type GetRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *GetRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s GetRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s GetRepositoryResponse) GoString() string { + return s.String() +} + +func (s *GetRepositoryResponse) SetHeaders(v map[string]*string) *GetRepositoryResponse { + s.Headers = v + return s +} + +func (s *GetRepositoryResponse) SetBody(v *GetRepositoryResponseBody) *GetRepositoryResponse { + s.Body = v + return s +} + +type ListArtifactBuildTaskLogRequest struct { + BuildTaskId *string `json:"BuildTaskId,omitempty" xml:"BuildTaskId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Page *int32 `json:"Page,omitempty" xml:"Page,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s ListArtifactBuildTaskLogRequest) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogRequest) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogRequest) SetBuildTaskId(v string) *ListArtifactBuildTaskLogRequest { + s.BuildTaskId = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetInstanceId(v string) *ListArtifactBuildTaskLogRequest { + s.InstanceId = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetPage(v int32) *ListArtifactBuildTaskLogRequest { + s.Page = &v + return s +} + +func (s *ListArtifactBuildTaskLogRequest) SetPageSize(v int32) *ListArtifactBuildTaskLogRequest { + s.PageSize = &v + return s +} + +type ListArtifactBuildTaskLogResponseBody struct { + BuildTaskLogs []*ListArtifactBuildTaskLogResponseBodyBuildTaskLogs `json:"BuildTaskLogs,omitempty" xml:"BuildTaskLogs,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListArtifactBuildTaskLogResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponseBody) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetBuildTaskLogs(v []*ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) *ListArtifactBuildTaskLogResponseBody { + s.BuildTaskLogs = v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetCode(v string) *ListArtifactBuildTaskLogResponseBody { + s.Code = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetIsSuccess(v bool) *ListArtifactBuildTaskLogResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetRequestId(v string) *ListArtifactBuildTaskLogResponseBody { + s.RequestId = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBody) SetTotalCount(v int32) *ListArtifactBuildTaskLogResponseBody { + s.TotalCount = &v + return s +} + +type ListArtifactBuildTaskLogResponseBodyBuildTaskLogs struct { + LineNumber *int32 `json:"LineNumber,omitempty" xml:"LineNumber,omitempty"` + Message *string `json:"Message,omitempty" xml:"Message,omitempty"` +} + +func (s ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) SetLineNumber(v int32) *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs { + s.LineNumber = &v + return s +} + +func (s *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs) SetMessage(v string) *ListArtifactBuildTaskLogResponseBodyBuildTaskLogs { + s.Message = &v + return s +} + +type ListArtifactBuildTaskLogResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListArtifactBuildTaskLogResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListArtifactBuildTaskLogResponse) String() string { + return tea.Prettify(s) +} + +func (s ListArtifactBuildTaskLogResponse) GoString() string { + return s.String() +} + +func (s *ListArtifactBuildTaskLogResponse) SetHeaders(v map[string]*string) *ListArtifactBuildTaskLogResponse { + s.Headers = v + return s +} + +func (s *ListArtifactBuildTaskLogResponse) SetBody(v *ListArtifactBuildTaskLogResponseBody) *ListArtifactBuildTaskLogResponse { + s.Body = v + return s +} + +type ListChainRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChainRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChainRequest) GoString() string { + return s.String() +} + +func (s *ListChainRequest) SetInstanceId(v string) *ListChainRequest { + s.InstanceId = &v + return s +} + +func (s *ListChainRequest) SetPageNo(v int32) *ListChainRequest { + s.PageNo = &v + return s +} + +func (s *ListChainRequest) SetPageSize(v int32) *ListChainRequest { + s.PageSize = &v + return s +} + +func (s *ListChainRequest) SetRepoName(v string) *ListChainRequest { + s.RepoName = &v + return s +} + +func (s *ListChainRequest) SetRepoNamespaceName(v string) *ListChainRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChainResponseBody struct { + Chains []*ListChainResponseBodyChains `json:"Chains,omitempty" xml:"Chains,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponseBody) GoString() string { + return s.String() +} + +func (s *ListChainResponseBody) SetChains(v []*ListChainResponseBodyChains) *ListChainResponseBody { + s.Chains = v + return s +} + +func (s *ListChainResponseBody) SetCode(v string) *ListChainResponseBody { + s.Code = &v + return s +} + +func (s *ListChainResponseBody) SetIsSuccess(v bool) *ListChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChainResponseBody) SetPageNo(v int32) *ListChainResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChainResponseBody) SetPageSize(v int32) *ListChainResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChainResponseBody) SetRequestId(v string) *ListChainResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChainResponseBody) SetTotalCount(v int32) *ListChainResponseBody { + s.TotalCount = &v + return s +} + +type ListChainResponseBodyChains struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` + ScopeId *string `json:"ScopeId,omitempty" xml:"ScopeId,omitempty"` + ScopeType *string `json:"ScopeType,omitempty" xml:"ScopeType,omitempty"` +} + +func (s ListChainResponseBodyChains) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponseBodyChains) GoString() string { + return s.String() +} + +func (s *ListChainResponseBodyChains) SetChainId(v string) *ListChainResponseBodyChains { + s.ChainId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetCreateTime(v int64) *ListChainResponseBodyChains { + s.CreateTime = &v + return s +} + +func (s *ListChainResponseBodyChains) SetDescription(v string) *ListChainResponseBodyChains { + s.Description = &v + return s +} + +func (s *ListChainResponseBodyChains) SetInstanceId(v string) *ListChainResponseBodyChains { + s.InstanceId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetModifiedTime(v int64) *ListChainResponseBodyChains { + s.ModifiedTime = &v + return s +} + +func (s *ListChainResponseBodyChains) SetName(v string) *ListChainResponseBodyChains { + s.Name = &v + return s +} + +func (s *ListChainResponseBodyChains) SetScopeId(v string) *ListChainResponseBodyChains { + s.ScopeId = &v + return s +} + +func (s *ListChainResponseBodyChains) SetScopeType(v string) *ListChainResponseBodyChains { + s.ScopeType = &v + return s +} + +type ListChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChainResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChainResponse) GoString() string { + return s.String() +} + +func (s *ListChainResponse) SetHeaders(v map[string]*string) *ListChainResponse { + s.Headers = v + return s +} + +func (s *ListChainResponse) SetBody(v *ListChainResponseBody) *ListChainResponse { + s.Body = v + return s +} + +type ListChainInstanceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChainInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceRequest) GoString() string { + return s.String() +} + +func (s *ListChainInstanceRequest) SetInstanceId(v string) *ListChainInstanceRequest { + s.InstanceId = &v + return s +} + +func (s *ListChainInstanceRequest) SetPageNo(v int32) *ListChainInstanceRequest { + s.PageNo = &v + return s +} + +func (s *ListChainInstanceRequest) SetPageSize(v int32) *ListChainInstanceRequest { + s.PageSize = &v + return s +} + +func (s *ListChainInstanceRequest) SetRepoName(v string) *ListChainInstanceRequest { + s.RepoName = &v + return s +} + +func (s *ListChainInstanceRequest) SetRepoNamespaceName(v string) *ListChainInstanceRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChainInstanceResponseBody struct { + ChainInstances []*ListChainInstanceResponseBodyChainInstances `json:"ChainInstances,omitempty" xml:"ChainInstances,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChainInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBody) SetChainInstances(v []*ListChainInstanceResponseBodyChainInstances) *ListChainInstanceResponseBody { + s.ChainInstances = v + return s +} + +func (s *ListChainInstanceResponseBody) SetCode(v string) *ListChainInstanceResponseBody { + s.Code = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetInstanceId(v string) *ListChainInstanceResponseBody { + s.InstanceId = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetIsSuccess(v bool) *ListChainInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetPageNo(v int32) *ListChainInstanceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetPageSize(v int32) *ListChainInstanceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetRequestId(v string) *ListChainInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChainInstanceResponseBody) SetTotalCount(v int32) *ListChainInstanceResponseBody { + s.TotalCount = &v + return s +} + +type ListChainInstanceResponseBodyChainInstances struct { + Chain *ListChainInstanceResponseBodyChainInstancesChain `json:"Chain,omitempty" xml:"Chain,omitempty" type:"Struct"` + ChainInstanceId *string `json:"ChainInstanceId,omitempty" xml:"ChainInstanceId,omitempty"` + EndTime *int64 `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + Result *string `json:"Result,omitempty" xml:"Result,omitempty"` + StartTime *int64 `json:"StartTime,omitempty" xml:"StartTime,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChainInstanceResponseBodyChainInstances) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBodyChainInstances) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetChain(v *ListChainInstanceResponseBodyChainInstancesChain) *ListChainInstanceResponseBodyChainInstances { + s.Chain = v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetChainInstanceId(v string) *ListChainInstanceResponseBodyChainInstances { + s.ChainInstanceId = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetEndTime(v int64) *ListChainInstanceResponseBodyChainInstances { + s.EndTime = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetRepoName(v string) *ListChainInstanceResponseBodyChainInstances { + s.RepoName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetRepoNamespaceName(v string) *ListChainInstanceResponseBodyChainInstances { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetResult(v string) *ListChainInstanceResponseBodyChainInstances { + s.Result = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetStartTime(v int64) *ListChainInstanceResponseBodyChainInstances { + s.StartTime = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstances) SetStatus(v string) *ListChainInstanceResponseBodyChainInstances { + s.Status = &v + return s +} + +type ListChainInstanceResponseBodyChainInstancesChain struct { + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + ChainName *string `json:"ChainName,omitempty" xml:"ChainName,omitempty"` + Version *int64 `json:"Version,omitempty" xml:"Version,omitempty"` +} + +func (s ListChainInstanceResponseBodyChainInstancesChain) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponseBodyChainInstancesChain) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetChainId(v string) *ListChainInstanceResponseBodyChainInstancesChain { + s.ChainId = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetChainName(v string) *ListChainInstanceResponseBodyChainInstancesChain { + s.ChainName = &v + return s +} + +func (s *ListChainInstanceResponseBodyChainInstancesChain) SetVersion(v int64) *ListChainInstanceResponseBodyChainInstancesChain { + s.Version = &v + return s +} + +type ListChainInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChainInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChainInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChainInstanceResponse) GoString() string { + return s.String() +} + +func (s *ListChainInstanceResponse) SetHeaders(v map[string]*string) *ListChainInstanceResponse { + s.Headers = v + return s +} + +func (s *ListChainInstanceResponse) SetBody(v *ListChainInstanceResponseBody) *ListChainInstanceResponse { + s.Body = v + return s +} + +type ListChartRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChartRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartRequest) GoString() string { + return s.String() +} + +func (s *ListChartRequest) SetInstanceId(v string) *ListChartRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartRequest) SetPageNo(v int32) *ListChartRequest { + s.PageNo = &v + return s +} + +func (s *ListChartRequest) SetPageSize(v int32) *ListChartRequest { + s.PageSize = &v + return s +} + +func (s *ListChartRequest) SetRepoName(v string) *ListChartRequest { + s.RepoName = &v + return s +} + +func (s *ListChartRequest) SetRepoNamespaceName(v string) *ListChartRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChartResponseBody struct { + Charts []*ListChartResponseBodyCharts `json:"Charts,omitempty" xml:"Charts,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartResponseBody) SetCharts(v []*ListChartResponseBodyCharts) *ListChartResponseBody { + s.Charts = v + return s +} + +func (s *ListChartResponseBody) SetCode(v string) *ListChartResponseBody { + s.Code = &v + return s +} + +func (s *ListChartResponseBody) SetIsSuccess(v bool) *ListChartResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartResponseBody) SetPageNo(v int32) *ListChartResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartResponseBody) SetPageSize(v int32) *ListChartResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartResponseBody) SetRequestId(v string) *ListChartResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartResponseBody) SetTotalCount(v int32) *ListChartResponseBody { + s.TotalCount = &v + return s +} + +type ListChartResponseBodyCharts struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + CreateTime *string `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChartResponseBodyCharts) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponseBodyCharts) GoString() string { + return s.String() +} + +func (s *ListChartResponseBodyCharts) SetChart(v string) *ListChartResponseBodyCharts { + s.Chart = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetCreateTime(v string) *ListChartResponseBodyCharts { + s.CreateTime = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetInstanceId(v string) *ListChartResponseBodyCharts { + s.InstanceId = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetModifiedTime(v int64) *ListChartResponseBodyCharts { + s.ModifiedTime = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetRepoId(v string) *ListChartResponseBodyCharts { + s.RepoId = &v + return s +} + +func (s *ListChartResponseBodyCharts) SetStatus(v string) *ListChartResponseBodyCharts { + s.Status = &v + return s +} + +type ListChartResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartResponse) GoString() string { + return s.String() +} + +func (s *ListChartResponse) SetHeaders(v map[string]*string) *ListChartResponse { + s.Headers = v + return s +} + +func (s *ListChartResponse) SetBody(v *ListChartResponseBody) *ListChartResponse { + s.Body = v + return s +} + +type ListChartNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` +} + +func (s ListChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceRequest) SetInstanceId(v string) *ListChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartNamespaceRequest) SetNamespaceName(v string) *ListChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *ListChartNamespaceRequest) SetNamespaceStatus(v string) *ListChartNamespaceRequest { + s.NamespaceStatus = &v + return s +} + +func (s *ListChartNamespaceRequest) SetPageNo(v int32) *ListChartNamespaceRequest { + s.PageNo = &v + return s +} + +func (s *ListChartNamespaceRequest) SetPageSize(v int32) *ListChartNamespaceRequest { + s.PageSize = &v + return s +} + +type ListChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Namespaces []*ListChartNamespaceResponseBodyNamespaces `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponseBody) SetCode(v string) *ListChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetIsSuccess(v bool) *ListChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetNamespaces(v []*ListChartNamespaceResponseBodyNamespaces) *ListChartNamespaceResponseBody { + s.Namespaces = v + return s +} + +func (s *ListChartNamespaceResponseBody) SetPageNo(v int32) *ListChartNamespaceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetPageSize(v int32) *ListChartNamespaceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetRequestId(v string) *ListChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartNamespaceResponseBody) SetTotalCount(v string) *ListChartNamespaceResponseBody { + s.TotalCount = &v + return s +} + +type ListChartNamespaceResponseBodyNamespaces struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListChartNamespaceResponseBodyNamespaces) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponseBodyNamespaces) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetAutoCreateRepo(v bool) *ListChartNamespaceResponseBodyNamespaces { + s.AutoCreateRepo = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetDefaultRepoType(v string) *ListChartNamespaceResponseBodyNamespaces { + s.DefaultRepoType = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetInstanceId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.InstanceId = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceId = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceName(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceName = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetNamespaceStatus(v string) *ListChartNamespaceResponseBodyNamespaces { + s.NamespaceStatus = &v + return s +} + +func (s *ListChartNamespaceResponseBodyNamespaces) SetResourceGroupId(v string) *ListChartNamespaceResponseBodyNamespaces { + s.ResourceGroupId = &v + return s +} + +type ListChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *ListChartNamespaceResponse) SetHeaders(v map[string]*string) *ListChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *ListChartNamespaceResponse) SetBody(v *ListChartNamespaceResponseBody) *ListChartNamespaceResponse { + s.Body = v + return s +} + +type ListChartReleaseRequest struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListChartReleaseRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseRequest) GoString() string { + return s.String() +} + +func (s *ListChartReleaseRequest) SetChart(v string) *ListChartReleaseRequest { + s.Chart = &v + return s +} + +func (s *ListChartReleaseRequest) SetInstanceId(v string) *ListChartReleaseRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartReleaseRequest) SetPageNo(v int32) *ListChartReleaseRequest { + s.PageNo = &v + return s +} + +func (s *ListChartReleaseRequest) SetPageSize(v int32) *ListChartReleaseRequest { + s.PageSize = &v + return s +} + +func (s *ListChartReleaseRequest) SetRepoName(v string) *ListChartReleaseRequest { + s.RepoName = &v + return s +} + +func (s *ListChartReleaseRequest) SetRepoNamespaceName(v string) *ListChartReleaseRequest { + s.RepoNamespaceName = &v + return s +} + +type ListChartReleaseResponseBody struct { + ChartReleases []*ListChartReleaseResponseBodyChartReleases `json:"ChartReleases,omitempty" xml:"ChartReleases,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartReleaseResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponseBody) SetChartReleases(v []*ListChartReleaseResponseBodyChartReleases) *ListChartReleaseResponseBody { + s.ChartReleases = v + return s +} + +func (s *ListChartReleaseResponseBody) SetCode(v string) *ListChartReleaseResponseBody { + s.Code = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetIsSuccess(v bool) *ListChartReleaseResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetPageNo(v int32) *ListChartReleaseResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetPageSize(v int32) *ListChartReleaseResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetRequestId(v string) *ListChartReleaseResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartReleaseResponseBody) SetTotalCount(v string) *ListChartReleaseResponseBody { + s.TotalCount = &v + return s +} + +type ListChartReleaseResponseBodyChartReleases struct { + Chart *string `json:"Chart,omitempty" xml:"Chart,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + Release *string `json:"Release,omitempty" xml:"Release,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + Size *string `json:"Size,omitempty" xml:"Size,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListChartReleaseResponseBodyChartReleases) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponseBodyChartReleases) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetChart(v string) *ListChartReleaseResponseBodyChartReleases { + s.Chart = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetInstanceId(v string) *ListChartReleaseResponseBodyChartReleases { + s.InstanceId = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetModifiedTime(v int64) *ListChartReleaseResponseBodyChartReleases { + s.ModifiedTime = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetRelease(v string) *ListChartReleaseResponseBodyChartReleases { + s.Release = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetRepoId(v string) *ListChartReleaseResponseBodyChartReleases { + s.RepoId = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetSize(v string) *ListChartReleaseResponseBodyChartReleases { + s.Size = &v + return s +} + +func (s *ListChartReleaseResponseBodyChartReleases) SetStatus(v string) *ListChartReleaseResponseBodyChartReleases { + s.Status = &v + return s +} + +type ListChartReleaseResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartReleaseResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartReleaseResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartReleaseResponse) GoString() string { + return s.String() +} + +func (s *ListChartReleaseResponse) SetHeaders(v map[string]*string) *ListChartReleaseResponse { + s.Headers = v + return s +} + +func (s *ListChartReleaseResponse) SetBody(v *ListChartReleaseResponseBody) *ListChartReleaseResponse { + s.Body = v + return s +} + +type ListChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` +} + +func (s ListChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryRequest) SetInstanceId(v string) *ListChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *ListChartRepositoryRequest) SetPageNo(v int32) *ListChartRepositoryRequest { + s.PageNo = &v + return s +} + +func (s *ListChartRepositoryRequest) SetPageSize(v int32) *ListChartRepositoryRequest { + s.PageSize = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoName(v string) *ListChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoNamespaceName(v string) *ListChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChartRepositoryRequest) SetRepoStatus(v string) *ListChartRepositoryRequest { + s.RepoStatus = &v + return s +} + +type ListChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Repositories []*ListChartRepositoryResponseBodyRepositories `json:"Repositories,omitempty" xml:"Repositories,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponseBody) SetCode(v string) *ListChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetIsSuccess(v bool) *ListChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetPageNo(v int32) *ListChartRepositoryResponseBody { + s.PageNo = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetPageSize(v int32) *ListChartRepositoryResponseBody { + s.PageSize = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetRepositories(v []*ListChartRepositoryResponseBodyRepositories) *ListChartRepositoryResponseBody { + s.Repositories = v + return s +} + +func (s *ListChartRepositoryResponseBody) SetRequestId(v string) *ListChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *ListChartRepositoryResponseBody) SetTotalCount(v string) *ListChartRepositoryResponseBody { + s.TotalCount = &v + return s +} + +type ListChartRepositoryResponseBodyRepositories struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s ListChartRepositoryResponseBodyRepositories) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponseBodyRepositories) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetCreateTime(v int64) *ListChartRepositoryResponseBodyRepositories { + s.CreateTime = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetInstanceId(v string) *ListChartRepositoryResponseBodyRepositories { + s.InstanceId = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetModifiedTime(v int64) *ListChartRepositoryResponseBodyRepositories { + s.ModifiedTime = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoId(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoId = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoName(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoName = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoNamespaceName(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoNamespaceName = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoStatus(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoStatus = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetRepoType(v string) *ListChartRepositoryResponseBodyRepositories { + s.RepoType = &v + return s +} + +func (s *ListChartRepositoryResponseBodyRepositories) SetSummary(v string) *ListChartRepositoryResponseBodyRepositories { + s.Summary = &v + return s +} + +type ListChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s ListChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *ListChartRepositoryResponse) SetHeaders(v map[string]*string) *ListChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *ListChartRepositoryResponse) SetBody(v *ListChartRepositoryResponseBody) *ListChartRepositoryResponse { + s.Body = v + return s +} + +type ListEventCenterRecordRequest struct { + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s ListEventCenterRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordRequest) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordRequest) SetEventType(v string) *ListEventCenterRecordRequest { + s.EventType = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetInstanceId(v string) *ListEventCenterRecordRequest { + s.InstanceId = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetPageNo(v int32) *ListEventCenterRecordRequest { + s.PageNo = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetPageSize(v int32) *ListEventCenterRecordRequest { + s.PageSize = &v + return s +} + +func (s *ListEventCenterRecordRequest) SetRuleId(v string) *ListEventCenterRecordRequest { + s.RuleId = &v + return s +} + +type ListEventCenterRecordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Records []*ListEventCenterRecordResponseBodyRecords `json:"Records,omitempty" xml:"Records,omitempty" type:"Repeated"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListEventCenterRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponseBody) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponseBody) SetCode(v string) *ListEventCenterRecordResponseBody { + s.Code = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetIsSuccess(v bool) *ListEventCenterRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetPageNo(v int32) *ListEventCenterRecordResponseBody { + s.PageNo = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetPageSize(v int32) *ListEventCenterRecordResponseBody { + s.PageSize = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetRecords(v []*ListEventCenterRecordResponseBodyRecords) *ListEventCenterRecordResponseBody { + s.Records = v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetRequestId(v string) *ListEventCenterRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *ListEventCenterRecordResponseBody) SetTotalCount(v int32) *ListEventCenterRecordResponseBody { + s.TotalCount = &v + return s +} + +type ListEventCenterRecordResponseBodyRecords struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventNotifyId *string `json:"EventNotifyId,omitempty" xml:"EventNotifyId,omitempty"` + EventNotifyMethod *string `json:"EventNotifyMethod,omitempty" xml:"EventNotifyMethod,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Namespace *string `json:"Namespace,omitempty" xml:"Namespace,omitempty"` + RecordId *string `json:"RecordId,omitempty" xml:"RecordId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + UpdateTime *int64 `json:"UpdateTime,omitempty" xml:"UpdateTime,omitempty"` +} + +func (s ListEventCenterRecordResponseBodyRecords) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponseBodyRecords) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetCreateTime(v int64) *ListEventCenterRecordResponseBodyRecords { + s.CreateTime = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventChannel(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventChannel = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventNotifyId(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventNotifyId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventNotifyMethod(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventNotifyMethod = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetEventType(v string) *ListEventCenterRecordResponseBodyRecords { + s.EventType = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetInstanceId(v string) *ListEventCenterRecordResponseBodyRecords { + s.InstanceId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetNamespace(v string) *ListEventCenterRecordResponseBodyRecords { + s.Namespace = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRecordId(v string) *ListEventCenterRecordResponseBodyRecords { + s.RecordId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRepoName(v string) *ListEventCenterRecordResponseBodyRecords { + s.RepoName = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRuleId(v string) *ListEventCenterRecordResponseBodyRecords { + s.RuleId = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetRuleName(v string) *ListEventCenterRecordResponseBodyRecords { + s.RuleName = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetTag(v string) *ListEventCenterRecordResponseBodyRecords { + s.Tag = &v + return s +} + +func (s *ListEventCenterRecordResponseBodyRecords) SetUpdateTime(v int64) *ListEventCenterRecordResponseBodyRecords { + s.UpdateTime = &v + return s +} + +type ListEventCenterRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListEventCenterRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListEventCenterRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRecordResponse) GoString() string { + return s.String() +} + +func (s *ListEventCenterRecordResponse) SetHeaders(v map[string]*string) *ListEventCenterRecordResponse { + s.Headers = v + return s +} + +func (s *ListEventCenterRecordResponse) SetBody(v *ListEventCenterRecordResponseBody) *ListEventCenterRecordResponse { + s.Body = v + return s +} + +type ListEventCenterRuleNameRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` +} + +func (s ListEventCenterRuleNameRequest) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameRequest) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameRequest) SetInstanceId(v string) *ListEventCenterRuleNameRequest { + s.InstanceId = &v + return s +} + +type ListEventCenterRuleNameResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + RuleNames []*ListEventCenterRuleNameResponseBodyRuleNames `json:"RuleNames,omitempty" xml:"RuleNames,omitempty" type:"Repeated"` +} + +func (s ListEventCenterRuleNameResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponseBody) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponseBody) SetCode(v string) *ListEventCenterRuleNameResponseBody { + s.Code = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetIsSuccess(v bool) *ListEventCenterRuleNameResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetRequestId(v string) *ListEventCenterRuleNameResponseBody { + s.RequestId = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBody) SetRuleNames(v []*ListEventCenterRuleNameResponseBodyRuleNames) *ListEventCenterRuleNameResponseBody { + s.RuleNames = v + return s +} + +type ListEventCenterRuleNameResponseBodyRuleNames struct { + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s ListEventCenterRuleNameResponseBodyRuleNames) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponseBodyRuleNames) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponseBodyRuleNames) SetRuleId(v string) *ListEventCenterRuleNameResponseBodyRuleNames { + s.RuleId = &v + return s +} + +func (s *ListEventCenterRuleNameResponseBodyRuleNames) SetRuleName(v string) *ListEventCenterRuleNameResponseBodyRuleNames { + s.RuleName = &v + return s +} + +type ListEventCenterRuleNameResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListEventCenterRuleNameResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListEventCenterRuleNameResponse) String() string { + return tea.Prettify(s) +} + +func (s ListEventCenterRuleNameResponse) GoString() string { + return s.String() +} + +func (s *ListEventCenterRuleNameResponse) SetHeaders(v map[string]*string) *ListEventCenterRuleNameResponse { + s.Headers = v + return s +} + +func (s *ListEventCenterRuleNameResponse) SetBody(v *ListEventCenterRuleNameResponseBody) *ListEventCenterRuleNameResponse { + s.Body = v + return s +} + +type ListInstanceRequest struct { + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListInstanceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceRequest) SetInstanceName(v string) *ListInstanceRequest { + s.InstanceName = &v + return s +} + +func (s *ListInstanceRequest) SetInstanceStatus(v string) *ListInstanceRequest { + s.InstanceStatus = &v + return s +} + +func (s *ListInstanceRequest) SetPageNo(v int32) *ListInstanceRequest { + s.PageNo = &v + return s +} + +func (s *ListInstanceRequest) SetPageSize(v int32) *ListInstanceRequest { + s.PageSize = &v + return s +} + +func (s *ListInstanceRequest) SetResourceGroupId(v string) *ListInstanceRequest { + s.ResourceGroupId = &v + return s +} + +type ListInstanceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Instances []*ListInstanceResponseBodyInstances `json:"Instances,omitempty" xml:"Instances,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListInstanceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceResponseBody) SetCode(v string) *ListInstanceResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceResponseBody) SetInstances(v []*ListInstanceResponseBodyInstances) *ListInstanceResponseBody { + s.Instances = v + return s +} + +func (s *ListInstanceResponseBody) SetIsSuccess(v bool) *ListInstanceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceResponseBody) SetPageNo(v int32) *ListInstanceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListInstanceResponseBody) SetPageSize(v int32) *ListInstanceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListInstanceResponseBody) SetRequestId(v string) *ListInstanceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListInstanceResponseBody) SetTotalCount(v int32) *ListInstanceResponseBody { + s.TotalCount = &v + return s +} + +type ListInstanceResponseBodyInstances struct { + CreateTime *string `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + InstanceName *string `json:"InstanceName,omitempty" xml:"InstanceName,omitempty"` + InstanceSpecification *string `json:"InstanceSpecification,omitempty" xml:"InstanceSpecification,omitempty"` + InstanceStatus *string `json:"InstanceStatus,omitempty" xml:"InstanceStatus,omitempty"` + ModifiedTime *string `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListInstanceResponseBodyInstances) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponseBodyInstances) GoString() string { + return s.String() +} + +func (s *ListInstanceResponseBodyInstances) SetCreateTime(v string) *ListInstanceResponseBodyInstances { + s.CreateTime = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceId(v string) *ListInstanceResponseBodyInstances { + s.InstanceId = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceName(v string) *ListInstanceResponseBodyInstances { + s.InstanceName = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceSpecification(v string) *ListInstanceResponseBodyInstances { + s.InstanceSpecification = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetInstanceStatus(v string) *ListInstanceResponseBodyInstances { + s.InstanceStatus = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetModifiedTime(v string) *ListInstanceResponseBodyInstances { + s.ModifiedTime = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetRegionId(v string) *ListInstanceResponseBodyInstances { + s.RegionId = &v + return s +} + +func (s *ListInstanceResponseBodyInstances) SetResourceGroupId(v string) *ListInstanceResponseBodyInstances { + s.ResourceGroupId = &v + return s +} + +type ListInstanceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceResponse) SetHeaders(v map[string]*string) *ListInstanceResponse { + s.Headers = v + return s +} + +func (s *ListInstanceResponse) SetBody(v *ListInstanceResponseBody) *ListInstanceResponse { + s.Body = v + return s +} + +type ListInstanceEndpointRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s ListInstanceEndpointRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointRequest) SetInstanceId(v string) *ListInstanceEndpointRequest { + s.InstanceId = &v + return s +} + +func (s *ListInstanceEndpointRequest) SetModuleName(v string) *ListInstanceEndpointRequest { + s.ModuleName = &v + return s +} + +type ListInstanceEndpointResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Endpoints []*ListInstanceEndpointResponseBodyEndpoints `json:"Endpoints,omitempty" xml:"Endpoints,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ListInstanceEndpointResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBody) SetCode(v string) *ListInstanceEndpointResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetEndpoints(v []*ListInstanceEndpointResponseBodyEndpoints) *ListInstanceEndpointResponseBody { + s.Endpoints = v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetIsSuccess(v bool) *ListInstanceEndpointResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceEndpointResponseBody) SetRequestId(v string) *ListInstanceEndpointResponseBody { + s.RequestId = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpoints struct { + AclEnable *bool `json:"AclEnable,omitempty" xml:"AclEnable,omitempty"` + AclEntries []*ListInstanceEndpointResponseBodyEndpointsAclEntries `json:"AclEntries,omitempty" xml:"AclEntries,omitempty" type:"Repeated"` + Domains []*ListInstanceEndpointResponseBodyEndpointsDomains `json:"Domains,omitempty" xml:"Domains,omitempty" type:"Repeated"` + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + LinkedVpcs []*ListInstanceEndpointResponseBodyEndpointsLinkedVpcs `json:"LinkedVpcs,omitempty" xml:"LinkedVpcs,omitempty" type:"Repeated"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpoints) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpoints) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetAclEnable(v bool) *ListInstanceEndpointResponseBodyEndpoints { + s.AclEnable = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetAclEntries(v []*ListInstanceEndpointResponseBodyEndpointsAclEntries) *ListInstanceEndpointResponseBodyEndpoints { + s.AclEntries = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetDomains(v []*ListInstanceEndpointResponseBodyEndpointsDomains) *ListInstanceEndpointResponseBodyEndpoints { + s.Domains = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetEnable(v bool) *ListInstanceEndpointResponseBodyEndpoints { + s.Enable = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetEndpointType(v string) *ListInstanceEndpointResponseBodyEndpoints { + s.EndpointType = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetLinkedVpcs(v []*ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) *ListInstanceEndpointResponseBodyEndpoints { + s.LinkedVpcs = v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpoints) SetStatus(v string) *ListInstanceEndpointResponseBodyEndpoints { + s.Status = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsAclEntries struct { + Entry *string `json:"Entry,omitempty" xml:"Entry,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsAclEntries) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsAclEntries) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsAclEntries) SetEntry(v string) *ListInstanceEndpointResponseBodyEndpointsAclEntries { + s.Entry = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsDomains struct { + Domain *string `json:"Domain,omitempty" xml:"Domain,omitempty"` + Type *string `json:"Type,omitempty" xml:"Type,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsDomains) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsDomains) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsDomains) SetDomain(v string) *ListInstanceEndpointResponseBodyEndpointsDomains { + s.Domain = &v + return s +} + +func (s *ListInstanceEndpointResponseBodyEndpointsDomains) SetType(v string) *ListInstanceEndpointResponseBodyEndpointsDomains { + s.Type = &v + return s +} + +type ListInstanceEndpointResponseBodyEndpointsLinkedVpcs struct { + VpcId *string `json:"VpcId,omitempty" xml:"VpcId,omitempty"` +} + +func (s ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponseBodyEndpointsLinkedVpcs) SetVpcId(v string) *ListInstanceEndpointResponseBodyEndpointsLinkedVpcs { + s.VpcId = &v + return s +} + +type ListInstanceEndpointResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceEndpointResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceEndpointResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceEndpointResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceEndpointResponse) SetHeaders(v map[string]*string) *ListInstanceEndpointResponse { + s.Headers = v + return s +} + +func (s *ListInstanceEndpointResponse) SetBody(v *ListInstanceEndpointResponseBody) *ListInstanceEndpointResponse { + s.Body = v + return s +} + +type ListInstanceRegionRequest struct { + Lang *string `json:"Lang,omitempty" xml:"Lang,omitempty"` +} + +func (s ListInstanceRegionRequest) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionRequest) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionRequest) SetLang(v string) *ListInstanceRegionRequest { + s.Lang = &v + return s +} + +type ListInstanceRegionResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Regions []*ListInstanceRegionResponseBodyRegions `json:"Regions,omitempty" xml:"Regions,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ListInstanceRegionResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponseBody) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponseBody) SetCode(v string) *ListInstanceRegionResponseBody { + s.Code = &v + return s +} + +func (s *ListInstanceRegionResponseBody) SetIsSuccess(v bool) *ListInstanceRegionResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListInstanceRegionResponseBody) SetRegions(v []*ListInstanceRegionResponseBodyRegions) *ListInstanceRegionResponseBody { + s.Regions = v + return s +} + +func (s *ListInstanceRegionResponseBody) SetRequestId(v string) *ListInstanceRegionResponseBody { + s.RequestId = &v + return s +} + +type ListInstanceRegionResponseBodyRegions struct { + LocalName *string `json:"LocalName,omitempty" xml:"LocalName,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` +} + +func (s ListInstanceRegionResponseBodyRegions) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponseBodyRegions) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponseBodyRegions) SetLocalName(v string) *ListInstanceRegionResponseBodyRegions { + s.LocalName = &v + return s +} + +func (s *ListInstanceRegionResponseBodyRegions) SetRegionId(v string) *ListInstanceRegionResponseBodyRegions { + s.RegionId = &v + return s +} + +type ListInstanceRegionResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListInstanceRegionResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListInstanceRegionResponse) String() string { + return tea.Prettify(s) +} + +func (s ListInstanceRegionResponse) GoString() string { + return s.String() +} + +func (s *ListInstanceRegionResponse) SetHeaders(v map[string]*string) *ListInstanceRegionResponse { + s.Headers = v + return s +} + +func (s *ListInstanceRegionResponse) SetBody(v *ListInstanceRegionResponseBody) *ListInstanceRegionResponse { + s.Body = v + return s +} + +type ListNamespaceRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tag []*ListNamespaceRequestTag `json:"Tag,omitempty" xml:"Tag,omitempty" type:"Repeated"` +} + +func (s ListNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceRequest) GoString() string { + return s.String() +} + +func (s *ListNamespaceRequest) SetInstanceId(v string) *ListNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *ListNamespaceRequest) SetNamespaceName(v string) *ListNamespaceRequest { + s.NamespaceName = &v + return s +} + +func (s *ListNamespaceRequest) SetNamespaceStatus(v string) *ListNamespaceRequest { + s.NamespaceStatus = &v + return s +} + +func (s *ListNamespaceRequest) SetPageNo(v int32) *ListNamespaceRequest { + s.PageNo = &v + return s +} + +func (s *ListNamespaceRequest) SetPageSize(v int32) *ListNamespaceRequest { + s.PageSize = &v + return s +} + +func (s *ListNamespaceRequest) SetResourceGroupId(v string) *ListNamespaceRequest { + s.ResourceGroupId = &v + return s +} + +func (s *ListNamespaceRequest) SetTag(v []*ListNamespaceRequestTag) *ListNamespaceRequest { + s.Tag = v + return s +} + +type ListNamespaceRequestTag struct { + Key *string `json:"Key,omitempty" xml:"Key,omitempty"` + Value *string `json:"Value,omitempty" xml:"Value,omitempty"` +} + +func (s ListNamespaceRequestTag) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceRequestTag) GoString() string { + return s.String() +} + +func (s *ListNamespaceRequestTag) SetKey(v string) *ListNamespaceRequestTag { + s.Key = &v + return s +} + +func (s *ListNamespaceRequestTag) SetValue(v string) *ListNamespaceRequestTag { + s.Value = &v + return s +} + +type ListNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + Namespaces []*ListNamespaceResponseBodyNamespaces `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBody) SetCode(v string) *ListNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *ListNamespaceResponseBody) SetIsSuccess(v bool) *ListNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListNamespaceResponseBody) SetNamespaces(v []*ListNamespaceResponseBodyNamespaces) *ListNamespaceResponseBody { + s.Namespaces = v + return s +} + +func (s *ListNamespaceResponseBody) SetPageNo(v int32) *ListNamespaceResponseBody { + s.PageNo = &v + return s +} + +func (s *ListNamespaceResponseBody) SetPageSize(v int32) *ListNamespaceResponseBody { + s.PageSize = &v + return s +} + +func (s *ListNamespaceResponseBody) SetRequestId(v string) *ListNamespaceResponseBody { + s.RequestId = &v + return s +} + +func (s *ListNamespaceResponseBody) SetTotalCount(v string) *ListNamespaceResponseBody { + s.TotalCount = &v + return s +} + +type ListNamespaceResponseBodyNamespaces struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceId *string `json:"NamespaceId,omitempty" xml:"NamespaceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + NamespaceStatus *string `json:"NamespaceStatus,omitempty" xml:"NamespaceStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Tags []*ListNamespaceResponseBodyNamespacesTags `json:"Tags,omitempty" xml:"Tags,omitempty" type:"Repeated"` +} + +func (s ListNamespaceResponseBodyNamespaces) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBodyNamespaces) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBodyNamespaces) SetAutoCreateRepo(v bool) *ListNamespaceResponseBodyNamespaces { + s.AutoCreateRepo = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetDefaultRepoType(v string) *ListNamespaceResponseBodyNamespaces { + s.DefaultRepoType = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetInstanceId(v string) *ListNamespaceResponseBodyNamespaces { + s.InstanceId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceId(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceName(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceName = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetNamespaceStatus(v string) *ListNamespaceResponseBodyNamespaces { + s.NamespaceStatus = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetResourceGroupId(v string) *ListNamespaceResponseBodyNamespaces { + s.ResourceGroupId = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespaces) SetTags(v []*ListNamespaceResponseBodyNamespacesTags) *ListNamespaceResponseBodyNamespaces { + s.Tags = v + return s +} + +type ListNamespaceResponseBodyNamespacesTags struct { + TagKey *string `json:"TagKey,omitempty" xml:"TagKey,omitempty"` + TagValue *string `json:"TagValue,omitempty" xml:"TagValue,omitempty"` +} + +func (s ListNamespaceResponseBodyNamespacesTags) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponseBodyNamespacesTags) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponseBodyNamespacesTags) SetTagKey(v string) *ListNamespaceResponseBodyNamespacesTags { + s.TagKey = &v + return s +} + +func (s *ListNamespaceResponseBodyNamespacesTags) SetTagValue(v string) *ListNamespaceResponseBodyNamespacesTags { + s.TagValue = &v + return s +} + +type ListNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s ListNamespaceResponse) GoString() string { + return s.String() +} + +func (s *ListNamespaceResponse) SetHeaders(v map[string]*string) *ListNamespaceResponse { + s.Headers = v + return s +} + +func (s *ListNamespaceResponse) SetBody(v *ListNamespaceResponseBody) *ListNamespaceResponse { + s.Body = v + return s +} + +type ListRepoBuildRecordRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRecordRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordRequest) SetInstanceId(v string) *ListRepoBuildRecordRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetPageNo(v int32) *ListRepoBuildRecordRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetPageSize(v int32) *ListRepoBuildRecordRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordRequest) SetRepoId(v string) *ListRepoBuildRecordRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRecordResponseBody struct { + BuildRecords []*ListRepoBuildRecordResponseBodyBuildRecords `json:"BuildRecords,omitempty" xml:"BuildRecords,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRecordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBody) SetBuildRecords(v []*ListRepoBuildRecordResponseBodyBuildRecords) *ListRepoBuildRecordResponseBody { + s.BuildRecords = v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetCode(v string) *ListRepoBuildRecordResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetIsSuccess(v bool) *ListRepoBuildRecordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetPageNo(v int32) *ListRepoBuildRecordResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetPageSize(v int32) *ListRepoBuildRecordResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetRequestId(v string) *ListRepoBuildRecordResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBody) SetTotalCount(v string) *ListRepoBuildRecordResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRecordResponseBodyBuildRecords struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + BuildStatus *string `json:"BuildStatus,omitempty" xml:"BuildStatus,omitempty"` + EndTime *string `json:"EndTime,omitempty" xml:"EndTime,omitempty"` + Image *ListRepoBuildRecordResponseBodyBuildRecordsImage `json:"Image,omitempty" xml:"Image,omitempty" type:"Struct"` + StartTime *string `json:"StartTime,omitempty" xml:"StartTime,omitempty"` +} + +func (s ListRepoBuildRecordResponseBodyBuildRecords) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBodyBuildRecords) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetBuildRecordId(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.BuildRecordId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetBuildStatus(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.BuildStatus = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetEndTime(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.EndTime = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetImage(v *ListRepoBuildRecordResponseBodyBuildRecordsImage) *ListRepoBuildRecordResponseBodyBuildRecords { + s.Image = v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecords) SetStartTime(v string) *ListRepoBuildRecordResponseBodyBuildRecords { + s.StartTime = &v + return s +} + +type ListRepoBuildRecordResponseBodyBuildRecordsImage struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoBuildRecordResponseBodyBuildRecordsImage) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponseBodyBuildRecordsImage) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetImageTag(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.ImageTag = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoId(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoId = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoName(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoName = &v + return s +} + +func (s *ListRepoBuildRecordResponseBodyBuildRecordsImage) SetRepoNamespaceName(v string) *ListRepoBuildRecordResponseBodyBuildRecordsImage { + s.RepoNamespaceName = &v + return s +} + +type ListRepoBuildRecordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRecordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRecordResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordResponse) SetHeaders(v map[string]*string) *ListRepoBuildRecordResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRecordResponse) SetBody(v *ListRepoBuildRecordResponseBody) *ListRepoBuildRecordResponse { + s.Body = v + return s +} + +type ListRepoBuildRecordLogRequest struct { + BuildRecordId *string `json:"BuildRecordId,omitempty" xml:"BuildRecordId,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Offset *int32 `json:"Offset,omitempty" xml:"Offset,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRecordLogRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogRequest) SetBuildRecordId(v string) *ListRepoBuildRecordLogRequest { + s.BuildRecordId = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetInstanceId(v string) *ListRepoBuildRecordLogRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetOffset(v int32) *ListRepoBuildRecordLogRequest { + s.Offset = &v + return s +} + +func (s *ListRepoBuildRecordLogRequest) SetRepoId(v string) *ListRepoBuildRecordLogRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRecordLogResponseBody struct { + BuildRecordLogs []*ListRepoBuildRecordLogResponseBodyBuildRecordLogs `json:"BuildRecordLogs,omitempty" xml:"BuildRecordLogs,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRecordLogResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponseBody) SetBuildRecordLogs(v []*ListRepoBuildRecordLogResponseBodyBuildRecordLogs) *ListRepoBuildRecordLogResponseBody { + s.BuildRecordLogs = v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetCode(v string) *ListRepoBuildRecordLogResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetIsSuccess(v bool) *ListRepoBuildRecordLogResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetPageNo(v int32) *ListRepoBuildRecordLogResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetPageSize(v int32) *ListRepoBuildRecordLogResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetRequestId(v string) *ListRepoBuildRecordLogResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBody) SetTotalCount(v string) *ListRepoBuildRecordLogResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRecordLogResponseBodyBuildRecordLogs struct { + BuildStage *string `json:"BuildStage,omitempty" xml:"BuildStage,omitempty"` + LineNumber *int32 `json:"LineNumber,omitempty" xml:"LineNumber,omitempty"` + Message *string `json:"Message,omitempty" xml:"Message,omitempty"` +} + +func (s ListRepoBuildRecordLogResponseBodyBuildRecordLogs) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponseBodyBuildRecordLogs) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetBuildStage(v string) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.BuildStage = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetLineNumber(v int32) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.LineNumber = &v + return s +} + +func (s *ListRepoBuildRecordLogResponseBodyBuildRecordLogs) SetMessage(v string) *ListRepoBuildRecordLogResponseBodyBuildRecordLogs { + s.Message = &v + return s +} + +type ListRepoBuildRecordLogResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRecordLogResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRecordLogResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRecordLogResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRecordLogResponse) SetHeaders(v map[string]*string) *ListRepoBuildRecordLogResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRecordLogResponse) SetBody(v *ListRepoBuildRecordLogResponseBody) *ListRepoBuildRecordLogResponse { + s.Body = v + return s +} + +type ListRepoBuildRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleRequest) SetInstanceId(v string) *ListRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetPageNo(v int32) *ListRepoBuildRuleRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetPageSize(v int32) *ListRepoBuildRuleRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRuleRequest) SetRepoId(v string) *ListRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type ListRepoBuildRuleResponseBody struct { + BuildRules []*ListRepoBuildRuleResponseBodyBuildRules `json:"BuildRules,omitempty" xml:"BuildRules,omitempty" type:"Repeated"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponseBody) SetBuildRules(v []*ListRepoBuildRuleResponseBodyBuildRules) *ListRepoBuildRuleResponseBody { + s.BuildRules = v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetCode(v string) *ListRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetIsSuccess(v bool) *ListRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetPageNo(v int32) *ListRepoBuildRuleResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetPageSize(v int32) *ListRepoBuildRuleResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetRequestId(v string) *ListRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoBuildRuleResponseBody) SetTotalCount(v string) *ListRepoBuildRuleResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoBuildRuleResponseBodyBuildRules struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` +} + +func (s ListRepoBuildRuleResponseBodyBuildRules) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponseBodyBuildRules) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetBuildArgs(v []*string) *ListRepoBuildRuleResponseBodyBuildRules { + s.BuildArgs = v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetBuildRuleId(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.BuildRuleId = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetDockerfileLocation(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.DockerfileLocation = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetDockerfileName(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.DockerfileName = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetImageTag(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.ImageTag = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPlatforms(v []*string) *ListRepoBuildRuleResponseBodyBuildRules { + s.Platforms = v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPushName(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.PushName = &v + return s +} + +func (s *ListRepoBuildRuleResponseBodyBuildRules) SetPushType(v string) *ListRepoBuildRuleResponseBodyBuildRules { + s.PushType = &v + return s +} + +type ListRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *ListRepoBuildRuleResponse) SetHeaders(v map[string]*string) *ListRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *ListRepoBuildRuleResponse) SetBody(v *ListRepoBuildRuleResponseBody) *ListRepoBuildRuleResponse { + s.Body = v + return s +} + +type ListRepoSyncRuleRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` +} + +func (s ListRepoSyncRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleRequest) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleRequest) SetInstanceId(v string) *ListRepoSyncRuleRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetNamespaceName(v string) *ListRepoSyncRuleRequest { + s.NamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetPageNo(v int32) *ListRepoSyncRuleRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetPageSize(v int32) *ListRepoSyncRuleRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetRepoName(v string) *ListRepoSyncRuleRequest { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetTargetInstanceId(v string) *ListRepoSyncRuleRequest { + s.TargetInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleRequest) SetTargetRegionId(v string) *ListRepoSyncRuleRequest { + s.TargetRegionId = &v + return s +} + +type ListRepoSyncRuleResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncRules []*ListRepoSyncRuleResponseBodySyncRules `json:"SyncRules,omitempty" xml:"SyncRules,omitempty" type:"Repeated"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoSyncRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponseBody) SetCode(v string) *ListRepoSyncRuleResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetIsSuccess(v bool) *ListRepoSyncRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetPageNo(v int32) *ListRepoSyncRuleResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetPageSize(v int32) *ListRepoSyncRuleResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetRequestId(v string) *ListRepoSyncRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetSyncRules(v []*ListRepoSyncRuleResponseBodySyncRules) *ListRepoSyncRuleResponseBody { + s.SyncRules = v + return s +} + +func (s *ListRepoSyncRuleResponseBody) SetTotalCount(v int32) *ListRepoSyncRuleResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoSyncRuleResponseBodySyncRules struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + LocalInstanceId *string `json:"LocalInstanceId,omitempty" xml:"LocalInstanceId,omitempty"` + LocalNamespaceName *string `json:"LocalNamespaceName,omitempty" xml:"LocalNamespaceName,omitempty"` + LocalRegionId *string `json:"LocalRegionId,omitempty" xml:"LocalRegionId,omitempty"` + LocalRepoName *string `json:"LocalRepoName,omitempty" xml:"LocalRepoName,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + SyncDirection *string `json:"SyncDirection,omitempty" xml:"SyncDirection,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncRuleName *string `json:"SyncRuleName,omitempty" xml:"SyncRuleName,omitempty"` + SyncScope *string `json:"SyncScope,omitempty" xml:"SyncScope,omitempty"` + SyncTrigger *string `json:"SyncTrigger,omitempty" xml:"SyncTrigger,omitempty"` + TagFilter *string `json:"TagFilter,omitempty" xml:"TagFilter,omitempty"` + TargetInstanceId *string `json:"TargetInstanceId,omitempty" xml:"TargetInstanceId,omitempty"` + TargetNamespaceName *string `json:"TargetNamespaceName,omitempty" xml:"TargetNamespaceName,omitempty"` + TargetRegionId *string `json:"TargetRegionId,omitempty" xml:"TargetRegionId,omitempty"` + TargetRepoName *string `json:"TargetRepoName,omitempty" xml:"TargetRepoName,omitempty"` +} + +func (s ListRepoSyncRuleResponseBodySyncRules) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponseBodySyncRules) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetCreateTime(v int64) *ListRepoSyncRuleResponseBodySyncRules { + s.CreateTime = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetCrossUser(v bool) *ListRepoSyncRuleResponseBodySyncRules { + s.CrossUser = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalInstanceId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalNamespaceName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalNamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalRegionId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalRegionId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetLocalRepoName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.LocalRepoName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetModifiedTime(v int64) *ListRepoSyncRuleResponseBodySyncRules { + s.ModifiedTime = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncDirection(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncDirection = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncRuleId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncRuleId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncRuleName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncRuleName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncScope(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncScope = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetSyncTrigger(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.SyncTrigger = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTagFilter(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TagFilter = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetInstanceId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetInstanceId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetNamespaceName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetNamespaceName = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetRegionId(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetRegionId = &v + return s +} + +func (s *ListRepoSyncRuleResponseBodySyncRules) SetTargetRepoName(v string) *ListRepoSyncRuleResponseBodySyncRules { + s.TargetRepoName = &v + return s +} + +type ListRepoSyncRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoSyncRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoSyncRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncRuleResponse) GoString() string { + return s.String() +} + +func (s *ListRepoSyncRuleResponse) SetHeaders(v map[string]*string) *ListRepoSyncRuleResponse { + s.Headers = v + return s +} + +func (s *ListRepoSyncRuleResponse) SetBody(v *ListRepoSyncRuleResponseBody) *ListRepoSyncRuleResponse { + s.Body = v + return s +} + +type ListRepoSyncTaskRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + SyncRecordId *string `json:"SyncRecordId,omitempty" xml:"SyncRecordId,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s ListRepoSyncTaskRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskRequest) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskRequest) SetInstanceId(v string) *ListRepoSyncTaskRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetPageNo(v int32) *ListRepoSyncTaskRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetPageSize(v int32) *ListRepoSyncTaskRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetRepoName(v string) *ListRepoSyncTaskRequest { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetRepoNamespaceName(v string) *ListRepoSyncTaskRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetSyncRecordId(v string) *ListRepoSyncTaskRequest { + s.SyncRecordId = &v + return s +} + +func (s *ListRepoSyncTaskRequest) SetTag(v string) *ListRepoSyncTaskRequest { + s.Tag = &v + return s +} + +type ListRepoSyncTaskResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + SyncTasks []*ListRepoSyncTaskResponseBodySyncTasks `json:"SyncTasks,omitempty" xml:"SyncTasks,omitempty" type:"Repeated"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoSyncTaskResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBody) SetCode(v string) *ListRepoSyncTaskResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetIsSuccess(v bool) *ListRepoSyncTaskResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetPageNo(v int32) *ListRepoSyncTaskResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetPageSize(v int32) *ListRepoSyncTaskResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetRequestId(v string) *ListRepoSyncTaskResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetSyncTasks(v []*ListRepoSyncTaskResponseBodySyncTasks) *ListRepoSyncTaskResponseBody { + s.SyncTasks = v + return s +} + +func (s *ListRepoSyncTaskResponseBody) SetTotalCount(v string) *ListRepoSyncTaskResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasks struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + CrossUser *bool `json:"CrossUser,omitempty" xml:"CrossUser,omitempty"` + CustomLink *bool `json:"CustomLink,omitempty" xml:"CustomLink,omitempty"` + ImageFrom *ListRepoSyncTaskResponseBodySyncTasksImageFrom `json:"ImageFrom,omitempty" xml:"ImageFrom,omitempty" type:"Struct"` + ImageTo *ListRepoSyncTaskResponseBodySyncTasksImageTo `json:"ImageTo,omitempty" xml:"ImageTo,omitempty" type:"Struct"` + ModifedTime *int64 `json:"ModifedTime,omitempty" xml:"ModifedTime,omitempty"` + SyncBatchTaskId *string `json:"SyncBatchTaskId,omitempty" xml:"SyncBatchTaskId,omitempty"` + SyncRuleId *string `json:"SyncRuleId,omitempty" xml:"SyncRuleId,omitempty"` + SyncTaskId *string `json:"SyncTaskId,omitempty" xml:"SyncTaskId,omitempty"` + SyncTransAccelerate *bool `json:"SyncTransAccelerate,omitempty" xml:"SyncTransAccelerate,omitempty"` + TaskStatus *string `json:"TaskStatus,omitempty" xml:"TaskStatus,omitempty"` + TaskTrigger *string `json:"TaskTrigger,omitempty" xml:"TaskTrigger,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasks) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasks) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCreateTime(v int64) *ListRepoSyncTaskResponseBodySyncTasks { + s.CreateTime = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCrossUser(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.CrossUser = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetCustomLink(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.CustomLink = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetImageFrom(v *ListRepoSyncTaskResponseBodySyncTasksImageFrom) *ListRepoSyncTaskResponseBodySyncTasks { + s.ImageFrom = v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetImageTo(v *ListRepoSyncTaskResponseBodySyncTasksImageTo) *ListRepoSyncTaskResponseBodySyncTasks { + s.ImageTo = v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetModifedTime(v int64) *ListRepoSyncTaskResponseBodySyncTasks { + s.ModifedTime = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncBatchTaskId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncBatchTaskId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncRuleId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncRuleId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncTaskId(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncTaskId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetSyncTransAccelerate(v bool) *ListRepoSyncTaskResponseBodySyncTasks { + s.SyncTransAccelerate = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetTaskStatus(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.TaskStatus = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasks) SetTaskTrigger(v string) *ListRepoSyncTaskResponseBodySyncTasks { + s.TaskTrigger = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasksImageFrom struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageFrom) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageFrom) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetImageTag(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.ImageTag = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetInstanceId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRegionId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RegionId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRepoName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageFrom) SetRepoNamespaceName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageFrom { + s.RepoNamespaceName = &v + return s +} + +type ListRepoSyncTaskResponseBodySyncTasksImageTo struct { + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RegionId *string `json:"RegionId,omitempty" xml:"RegionId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageTo) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponseBodySyncTasksImageTo) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetImageTag(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.ImageTag = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetInstanceId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.InstanceId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRegionId(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RegionId = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRepoName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RepoName = &v + return s +} + +func (s *ListRepoSyncTaskResponseBodySyncTasksImageTo) SetRepoNamespaceName(v string) *ListRepoSyncTaskResponseBodySyncTasksImageTo { + s.RepoNamespaceName = &v + return s +} + +type ListRepoSyncTaskResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoSyncTaskResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoSyncTaskResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoSyncTaskResponse) GoString() string { + return s.String() +} + +func (s *ListRepoSyncTaskResponse) SetHeaders(v map[string]*string) *ListRepoSyncTaskResponse { + s.Headers = v + return s +} + +func (s *ListRepoSyncTaskResponse) SetBody(v *ListRepoSyncTaskResponseBody) *ListRepoSyncTaskResponse { + s.Body = v + return s +} + +type ListRepoTagRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoTagRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTagRequest) SetInstanceId(v string) *ListRepoTagRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTagRequest) SetPageNo(v int32) *ListRepoTagRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoTagRequest) SetPageSize(v int32) *ListRepoTagRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoTagRequest) SetRepoId(v string) *ListRepoTagRequest { + s.RepoId = &v + return s +} + +type ListRepoTagResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + Images []*ListRepoTagResponseBodyImages `json:"Images,omitempty" xml:"Images,omitempty" type:"Repeated"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepoTagResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponseBody) SetCode(v string) *ListRepoTagResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTagResponseBody) SetImages(v []*ListRepoTagResponseBodyImages) *ListRepoTagResponseBody { + s.Images = v + return s +} + +func (s *ListRepoTagResponseBody) SetIsSuccess(v bool) *ListRepoTagResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTagResponseBody) SetPageNo(v int32) *ListRepoTagResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoTagResponseBody) SetPageSize(v int32) *ListRepoTagResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoTagResponseBody) SetRequestId(v string) *ListRepoTagResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTagResponseBody) SetTotalCount(v string) *ListRepoTagResponseBody { + s.TotalCount = &v + return s +} + +type ListRepoTagResponseBodyImages struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + ImageCreate *string `json:"ImageCreate,omitempty" xml:"ImageCreate,omitempty"` + ImageId *string `json:"ImageId,omitempty" xml:"ImageId,omitempty"` + ImageSize *int64 `json:"ImageSize,omitempty" xml:"ImageSize,omitempty"` + ImageUpdate *string `json:"ImageUpdate,omitempty" xml:"ImageUpdate,omitempty"` + Status *string `json:"Status,omitempty" xml:"Status,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` +} + +func (s ListRepoTagResponseBodyImages) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponseBodyImages) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponseBodyImages) SetDigest(v string) *ListRepoTagResponseBodyImages { + s.Digest = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageCreate(v string) *ListRepoTagResponseBodyImages { + s.ImageCreate = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageId(v string) *ListRepoTagResponseBodyImages { + s.ImageId = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageSize(v int64) *ListRepoTagResponseBodyImages { + s.ImageSize = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetImageUpdate(v string) *ListRepoTagResponseBodyImages { + s.ImageUpdate = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetStatus(v string) *ListRepoTagResponseBodyImages { + s.Status = &v + return s +} + +func (s *ListRepoTagResponseBodyImages) SetTag(v string) *ListRepoTagResponseBodyImages { + s.Tag = &v + return s +} + +type ListRepoTagResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTagResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTagResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTagResponse) SetHeaders(v map[string]*string) *ListRepoTagResponse { + s.Headers = v + return s +} + +func (s *ListRepoTagResponse) SetBody(v *ListRepoTagResponseBody) *ListRepoTagResponse { + s.Body = v + return s +} + +type ListRepoTagScanResultRequest struct { + Digest *string `json:"Digest,omitempty" xml:"Digest,omitempty"` + FilterValue *string `json:"FilterValue,omitempty" xml:"FilterValue,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + ScanTaskId *string `json:"ScanTaskId,omitempty" xml:"ScanTaskId,omitempty"` + ScanType *string `json:"ScanType,omitempty" xml:"ScanType,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` + Tag *string `json:"Tag,omitempty" xml:"Tag,omitempty"` + VulQueryKey *string `json:"VulQueryKey,omitempty" xml:"VulQueryKey,omitempty"` +} + +func (s ListRepoTagScanResultRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultRequest) SetDigest(v string) *ListRepoTagScanResultRequest { + s.Digest = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetFilterValue(v string) *ListRepoTagScanResultRequest { + s.FilterValue = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetInstanceId(v string) *ListRepoTagScanResultRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetPageNo(v int32) *ListRepoTagScanResultRequest { + s.PageNo = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetPageSize(v int32) *ListRepoTagScanResultRequest { + s.PageSize = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetRepoId(v string) *ListRepoTagScanResultRequest { + s.RepoId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetScanTaskId(v string) *ListRepoTagScanResultRequest { + s.ScanTaskId = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetScanType(v string) *ListRepoTagScanResultRequest { + s.ScanType = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetSeverity(v string) *ListRepoTagScanResultRequest { + s.Severity = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetTag(v string) *ListRepoTagScanResultRequest { + s.Tag = &v + return s +} + +func (s *ListRepoTagScanResultRequest) SetVulQueryKey(v string) *ListRepoTagScanResultRequest { + s.VulQueryKey = &v + return s +} + +type ListRepoTagScanResultResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *int32 `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` + Vulnerabilities []*ListRepoTagScanResultResponseBodyVulnerabilities `json:"Vulnerabilities,omitempty" xml:"Vulnerabilities,omitempty" type:"Repeated"` +} + +func (s ListRepoTagScanResultResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponseBody) SetCode(v string) *ListRepoTagScanResultResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetIsSuccess(v bool) *ListRepoTagScanResultResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetPageNo(v int32) *ListRepoTagScanResultResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetPageSize(v int32) *ListRepoTagScanResultResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetRequestId(v string) *ListRepoTagScanResultResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetTotalCount(v int32) *ListRepoTagScanResultResponseBody { + s.TotalCount = &v + return s +} + +func (s *ListRepoTagScanResultResponseBody) SetVulnerabilities(v []*ListRepoTagScanResultResponseBodyVulnerabilities) *ListRepoTagScanResultResponseBody { + s.Vulnerabilities = v + return s +} + +type ListRepoTagScanResultResponseBodyVulnerabilities struct { + AddedBy *string `json:"AddedBy,omitempty" xml:"AddedBy,omitempty"` + AliasName *string `json:"AliasName,omitempty" xml:"AliasName,omitempty"` + CveLink *string `json:"CveLink,omitempty" xml:"CveLink,omitempty"` + CveLocation *string `json:"CveLocation,omitempty" xml:"CveLocation,omitempty"` + CveName *string `json:"CveName,omitempty" xml:"CveName,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + Feature *string `json:"Feature,omitempty" xml:"Feature,omitempty"` + FixCmd *string `json:"FixCmd,omitempty" xml:"FixCmd,omitempty"` + ScanType *string `json:"ScanType,omitempty" xml:"ScanType,omitempty"` + Severity *string `json:"Severity,omitempty" xml:"Severity,omitempty"` + Version *string `json:"Version,omitempty" xml:"Version,omitempty"` + VersionFixed *string `json:"VersionFixed,omitempty" xml:"VersionFixed,omitempty"` + VersionFormat *string `json:"VersionFormat,omitempty" xml:"VersionFormat,omitempty"` +} + +func (s ListRepoTagScanResultResponseBodyVulnerabilities) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponseBodyVulnerabilities) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetAddedBy(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.AddedBy = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetAliasName(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.AliasName = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveLink(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveLink = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveLocation(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveLocation = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetCveName(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.CveName = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetDescription(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Description = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetFeature(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Feature = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetFixCmd(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.FixCmd = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetScanType(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.ScanType = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetSeverity(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Severity = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersion(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.Version = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersionFixed(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.VersionFixed = &v + return s +} + +func (s *ListRepoTagScanResultResponseBodyVulnerabilities) SetVersionFormat(v string) *ListRepoTagScanResultResponseBodyVulnerabilities { + s.VersionFormat = &v + return s +} + +type ListRepoTagScanResultResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTagScanResultResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTagScanResultResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTagScanResultResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTagScanResultResponse) SetHeaders(v map[string]*string) *ListRepoTagScanResultResponse { + s.Headers = v + return s +} + +func (s *ListRepoTagScanResultResponse) SetBody(v *ListRepoTagScanResultResponseBody) *ListRepoTagScanResultResponse { + s.Body = v + return s +} + +type ListRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s ListRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerRequest) SetInstanceId(v string) *ListRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepoTriggerRequest) SetRepoId(v string) *ListRepoTriggerRequest { + s.RepoId = &v + return s +} + +type ListRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + Triggers []*ListRepoTriggerResponseBodyTriggers `json:"Triggers,omitempty" xml:"Triggers,omitempty" type:"Repeated"` +} + +func (s ListRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponseBody) SetCode(v string) *ListRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetIsSuccess(v bool) *ListRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetRequestId(v string) *ListRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepoTriggerResponseBody) SetTriggers(v []*ListRepoTriggerResponseBodyTriggers) *ListRepoTriggerResponseBody { + s.Triggers = v + return s +} + +type ListRepoTriggerResponseBodyTriggers struct { + RepoEvent *string `json:"RepoEvent,omitempty" xml:"RepoEvent,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s ListRepoTriggerResponseBodyTriggers) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponseBodyTriggers) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetRepoEvent(v string) *ListRepoTriggerResponseBodyTriggers { + s.RepoEvent = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerId(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerId = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerName(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerName = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerTag(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerTag = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerType(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerType = &v + return s +} + +func (s *ListRepoTriggerResponseBodyTriggers) SetTriggerUrl(v string) *ListRepoTriggerResponseBodyTriggers { + s.TriggerUrl = &v + return s +} + +type ListRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *ListRepoTriggerResponse) SetHeaders(v map[string]*string) *ListRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *ListRepoTriggerResponse) SetBody(v *ListRepoTriggerResponseBody) *ListRepoTriggerResponse { + s.Body = v + return s +} + +type ListRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` +} + +func (s ListRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryRequest) GoString() string { + return s.String() +} + +func (s *ListRepositoryRequest) SetInstanceId(v string) *ListRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *ListRepositoryRequest) SetPageNo(v int32) *ListRepositoryRequest { + s.PageNo = &v + return s +} + +func (s *ListRepositoryRequest) SetPageSize(v int32) *ListRepositoryRequest { + s.PageSize = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoName(v string) *ListRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoNamespaceName(v string) *ListRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepositoryRequest) SetRepoStatus(v string) *ListRepositoryRequest { + s.RepoStatus = &v + return s +} + +func (s *ListRepositoryRequest) SetResourceGroupId(v string) *ListRepositoryRequest { + s.ResourceGroupId = &v + return s +} + +type ListRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + PageNo *int32 `json:"PageNo,omitempty" xml:"PageNo,omitempty"` + PageSize *int32 `json:"PageSize,omitempty" xml:"PageSize,omitempty"` + Repositories []*ListRepositoryResponseBodyRepositories `json:"Repositories,omitempty" xml:"Repositories,omitempty" type:"Repeated"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + TotalCount *string `json:"TotalCount,omitempty" xml:"TotalCount,omitempty"` +} + +func (s ListRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponseBody) SetCode(v string) *ListRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *ListRepositoryResponseBody) SetIsSuccess(v bool) *ListRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ListRepositoryResponseBody) SetPageNo(v int32) *ListRepositoryResponseBody { + s.PageNo = &v + return s +} + +func (s *ListRepositoryResponseBody) SetPageSize(v int32) *ListRepositoryResponseBody { + s.PageSize = &v + return s +} + +func (s *ListRepositoryResponseBody) SetRepositories(v []*ListRepositoryResponseBodyRepositories) *ListRepositoryResponseBody { + s.Repositories = v + return s +} + +func (s *ListRepositoryResponseBody) SetRequestId(v string) *ListRepositoryResponseBody { + s.RequestId = &v + return s +} + +func (s *ListRepositoryResponseBody) SetTotalCount(v string) *ListRepositoryResponseBody { + s.TotalCount = &v + return s +} + +type ListRepositoryResponseBodyRepositories struct { + CreateTime *int64 `json:"CreateTime,omitempty" xml:"CreateTime,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModifiedTime *int64 `json:"ModifiedTime,omitempty" xml:"ModifiedTime,omitempty"` + RepoBuildType *string `json:"RepoBuildType,omitempty" xml:"RepoBuildType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoStatus *string `json:"RepoStatus,omitempty" xml:"RepoStatus,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + ResourceGroupId *string `json:"ResourceGroupId,omitempty" xml:"ResourceGroupId,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s ListRepositoryResponseBodyRepositories) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponseBodyRepositories) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponseBodyRepositories) SetCreateTime(v int64) *ListRepositoryResponseBodyRepositories { + s.CreateTime = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetInstanceId(v string) *ListRepositoryResponseBodyRepositories { + s.InstanceId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetModifiedTime(v int64) *ListRepositoryResponseBodyRepositories { + s.ModifiedTime = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoBuildType(v string) *ListRepositoryResponseBodyRepositories { + s.RepoBuildType = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoId(v string) *ListRepositoryResponseBodyRepositories { + s.RepoId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoName(v string) *ListRepositoryResponseBodyRepositories { + s.RepoName = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoNamespaceName(v string) *ListRepositoryResponseBodyRepositories { + s.RepoNamespaceName = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoStatus(v string) *ListRepositoryResponseBodyRepositories { + s.RepoStatus = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetRepoType(v string) *ListRepositoryResponseBodyRepositories { + s.RepoType = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetResourceGroupId(v string) *ListRepositoryResponseBodyRepositories { + s.ResourceGroupId = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetSummary(v string) *ListRepositoryResponseBodyRepositories { + s.Summary = &v + return s +} + +func (s *ListRepositoryResponseBodyRepositories) SetTagImmutability(v bool) *ListRepositoryResponseBodyRepositories { + s.TagImmutability = &v + return s +} + +type ListRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ListRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ListRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s ListRepositoryResponse) GoString() string { + return s.String() +} + +func (s *ListRepositoryResponse) SetHeaders(v map[string]*string) *ListRepositoryResponse { + s.Headers = v + return s +} + +func (s *ListRepositoryResponse) SetBody(v *ListRepositoryResponseBody) *ListRepositoryResponse { + s.Body = v + return s +} + +type ResetLoginPasswordRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Password *string `json:"Password,omitempty" xml:"Password,omitempty"` +} + +func (s ResetLoginPasswordRequest) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordRequest) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordRequest) SetInstanceId(v string) *ResetLoginPasswordRequest { + s.InstanceId = &v + return s +} + +func (s *ResetLoginPasswordRequest) SetPassword(v string) *ResetLoginPasswordRequest { + s.Password = &v + return s +} + +type ResetLoginPasswordResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s ResetLoginPasswordResponseBody) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordResponseBody) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordResponseBody) SetCode(v string) *ResetLoginPasswordResponseBody { + s.Code = &v + return s +} + +func (s *ResetLoginPasswordResponseBody) SetIsSuccess(v bool) *ResetLoginPasswordResponseBody { + s.IsSuccess = &v + return s +} + +func (s *ResetLoginPasswordResponseBody) SetRequestId(v string) *ResetLoginPasswordResponseBody { + s.RequestId = &v + return s +} + +type ResetLoginPasswordResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *ResetLoginPasswordResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s ResetLoginPasswordResponse) String() string { + return tea.Prettify(s) +} + +func (s ResetLoginPasswordResponse) GoString() string { + return s.String() +} + +func (s *ResetLoginPasswordResponse) SetHeaders(v map[string]*string) *ResetLoginPasswordResponse { + s.Headers = v + return s +} + +func (s *ResetLoginPasswordResponse) SetBody(v *ResetLoginPasswordResponseBody) *ResetLoginPasswordResponse { + s.Body = v + return s +} + +type UpdateChainRequest struct { + ChainConfig *string `json:"ChainConfig,omitempty" xml:"ChainConfig,omitempty"` + ChainId *string `json:"ChainId,omitempty" xml:"ChainId,omitempty"` + Description *string `json:"Description,omitempty" xml:"Description,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Name *string `json:"Name,omitempty" xml:"Name,omitempty"` +} + +func (s UpdateChainRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainRequest) GoString() string { + return s.String() +} + +func (s *UpdateChainRequest) SetChainConfig(v string) *UpdateChainRequest { + s.ChainConfig = &v + return s +} + +func (s *UpdateChainRequest) SetChainId(v string) *UpdateChainRequest { + s.ChainId = &v + return s +} + +func (s *UpdateChainRequest) SetDescription(v string) *UpdateChainRequest { + s.Description = &v + return s +} + +func (s *UpdateChainRequest) SetInstanceId(v string) *UpdateChainRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChainRequest) SetName(v string) *UpdateChainRequest { + s.Name = &v + return s +} + +type UpdateChainResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChainResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChainResponseBody) SetCode(v string) *UpdateChainResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChainResponseBody) SetIsSuccess(v bool) *UpdateChainResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChainResponseBody) SetRequestId(v string) *UpdateChainResponseBody { + s.RequestId = &v + return s +} + +type UpdateChainResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChainResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChainResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChainResponse) GoString() string { + return s.String() +} + +func (s *UpdateChainResponse) SetHeaders(v map[string]*string) *UpdateChainResponse { + s.Headers = v + return s +} + +func (s *UpdateChainResponse) SetBody(v *UpdateChainResponseBody) *UpdateChainResponse { + s.Body = v + return s +} + +type UpdateChartNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s UpdateChartNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceRequest) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceRequest) SetAutoCreateRepo(v bool) *UpdateChartNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetDefaultRepoType(v string) *UpdateChartNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetInstanceId(v string) *UpdateChartNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChartNamespaceRequest) SetNamespaceName(v string) *UpdateChartNamespaceRequest { + s.NamespaceName = &v + return s +} + +type UpdateChartNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChartNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceResponseBody) SetCode(v string) *UpdateChartNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChartNamespaceResponseBody) SetIsSuccess(v bool) *UpdateChartNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChartNamespaceResponseBody) SetRequestId(v string) *UpdateChartNamespaceResponseBody { + s.RequestId = &v + return s +} + +type UpdateChartNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChartNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChartNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateChartNamespaceResponse) SetHeaders(v map[string]*string) *UpdateChartNamespaceResponse { + s.Headers = v + return s +} + +func (s *UpdateChartNamespaceResponse) SetBody(v *UpdateChartNamespaceResponseBody) *UpdateChartNamespaceResponse { + s.Body = v + return s +} + +type UpdateChartRepositoryRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoName *string `json:"RepoName,omitempty" xml:"RepoName,omitempty"` + RepoNamespaceName *string `json:"RepoNamespaceName,omitempty" xml:"RepoNamespaceName,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` +} + +func (s UpdateChartRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryRequest) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryRequest) SetInstanceId(v string) *UpdateChartRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoName(v string) *UpdateChartRepositoryRequest { + s.RepoName = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoNamespaceName(v string) *UpdateChartRepositoryRequest { + s.RepoNamespaceName = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetRepoType(v string) *UpdateChartRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *UpdateChartRepositoryRequest) SetSummary(v string) *UpdateChartRepositoryRequest { + s.Summary = &v + return s +} + +type UpdateChartRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateChartRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryResponseBody) SetCode(v string) *UpdateChartRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *UpdateChartRepositoryResponseBody) SetIsSuccess(v bool) *UpdateChartRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateChartRepositoryResponseBody) SetRequestId(v string) *UpdateChartRepositoryResponseBody { + s.RequestId = &v + return s +} + +type UpdateChartRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateChartRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateChartRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateChartRepositoryResponse) GoString() string { + return s.String() +} + +func (s *UpdateChartRepositoryResponse) SetHeaders(v map[string]*string) *UpdateChartRepositoryResponse { + s.Headers = v + return s +} + +func (s *UpdateChartRepositoryResponse) SetBody(v *UpdateChartRepositoryResponseBody) *UpdateChartRepositoryResponse { + s.Body = v + return s +} + +type UpdateEventCenterRuleRequest struct { + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventConfig *string `json:"EventConfig,omitempty" xml:"EventConfig,omitempty"` + EventScope *string `json:"EventScope,omitempty" xml:"EventScope,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Namespaces []*string `json:"Namespaces,omitempty" xml:"Namespaces,omitempty" type:"Repeated"` + RepoNames []*string `json:"RepoNames,omitempty" xml:"RepoNames,omitempty" type:"Repeated"` + RepoTagFilterPattern *string `json:"RepoTagFilterPattern,omitempty" xml:"RepoTagFilterPattern,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s UpdateEventCenterRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleRequest) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleRequest) SetEventChannel(v string) *UpdateEventCenterRuleRequest { + s.EventChannel = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventConfig(v string) *UpdateEventCenterRuleRequest { + s.EventConfig = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventScope(v string) *UpdateEventCenterRuleRequest { + s.EventScope = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetEventType(v string) *UpdateEventCenterRuleRequest { + s.EventType = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetInstanceId(v string) *UpdateEventCenterRuleRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetNamespaces(v []*string) *UpdateEventCenterRuleRequest { + s.Namespaces = v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRepoNames(v []*string) *UpdateEventCenterRuleRequest { + s.RepoNames = v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRepoTagFilterPattern(v string) *UpdateEventCenterRuleRequest { + s.RepoTagFilterPattern = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRuleId(v string) *UpdateEventCenterRuleRequest { + s.RuleId = &v + return s +} + +func (s *UpdateEventCenterRuleRequest) SetRuleName(v string) *UpdateEventCenterRuleRequest { + s.RuleName = &v + return s +} + +type UpdateEventCenterRuleShrinkRequest struct { + EventChannel *string `json:"EventChannel,omitempty" xml:"EventChannel,omitempty"` + EventConfig *string `json:"EventConfig,omitempty" xml:"EventConfig,omitempty"` + EventScope *string `json:"EventScope,omitempty" xml:"EventScope,omitempty"` + EventType *string `json:"EventType,omitempty" xml:"EventType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespacesShrink *string `json:"Namespaces,omitempty" xml:"Namespaces,omitempty"` + RepoNamesShrink *string `json:"RepoNames,omitempty" xml:"RepoNames,omitempty"` + RepoTagFilterPattern *string `json:"RepoTagFilterPattern,omitempty" xml:"RepoTagFilterPattern,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` + RuleName *string `json:"RuleName,omitempty" xml:"RuleName,omitempty"` +} + +func (s UpdateEventCenterRuleShrinkRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleShrinkRequest) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventChannel(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventChannel = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventConfig(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventConfig = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventScope(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventScope = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetEventType(v string) *UpdateEventCenterRuleShrinkRequest { + s.EventType = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetInstanceId(v string) *UpdateEventCenterRuleShrinkRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetNamespacesShrink(v string) *UpdateEventCenterRuleShrinkRequest { + s.NamespacesShrink = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRepoNamesShrink(v string) *UpdateEventCenterRuleShrinkRequest { + s.RepoNamesShrink = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRepoTagFilterPattern(v string) *UpdateEventCenterRuleShrinkRequest { + s.RepoTagFilterPattern = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRuleId(v string) *UpdateEventCenterRuleShrinkRequest { + s.RuleId = &v + return s +} + +func (s *UpdateEventCenterRuleShrinkRequest) SetRuleName(v string) *UpdateEventCenterRuleShrinkRequest { + s.RuleName = &v + return s +} + +type UpdateEventCenterRuleResponseBody struct { + Code *int32 `json:"Code,omitempty" xml:"Code,omitempty"` + // Id of the request + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` + RuleId *string `json:"RuleId,omitempty" xml:"RuleId,omitempty"` +} + +func (s UpdateEventCenterRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleResponseBody) SetCode(v int32) *UpdateEventCenterRuleResponseBody { + s.Code = &v + return s +} + +func (s *UpdateEventCenterRuleResponseBody) SetRequestId(v string) *UpdateEventCenterRuleResponseBody { + s.RequestId = &v + return s +} + +func (s *UpdateEventCenterRuleResponseBody) SetRuleId(v string) *UpdateEventCenterRuleResponseBody { + s.RuleId = &v + return s +} + +type UpdateEventCenterRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateEventCenterRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateEventCenterRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateEventCenterRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateEventCenterRuleResponse) SetHeaders(v map[string]*string) *UpdateEventCenterRuleResponse { + s.Headers = v + return s +} + +func (s *UpdateEventCenterRuleResponse) SetBody(v *UpdateEventCenterRuleResponseBody) *UpdateEventCenterRuleResponse { + s.Body = v + return s +} + +type UpdateInstanceEndpointStatusRequest struct { + Enable *bool `json:"Enable,omitempty" xml:"Enable,omitempty"` + EndpointType *string `json:"EndpointType,omitempty" xml:"EndpointType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + ModuleName *string `json:"ModuleName,omitempty" xml:"ModuleName,omitempty"` +} + +func (s UpdateInstanceEndpointStatusRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusRequest) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusRequest) SetEnable(v bool) *UpdateInstanceEndpointStatusRequest { + s.Enable = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetEndpointType(v string) *UpdateInstanceEndpointStatusRequest { + s.EndpointType = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetInstanceId(v string) *UpdateInstanceEndpointStatusRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateInstanceEndpointStatusRequest) SetModuleName(v string) *UpdateInstanceEndpointStatusRequest { + s.ModuleName = &v + return s +} + +type UpdateInstanceEndpointStatusResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateInstanceEndpointStatusResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetCode(v string) *UpdateInstanceEndpointStatusResponseBody { + s.Code = &v + return s +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetIsSuccess(v bool) *UpdateInstanceEndpointStatusResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateInstanceEndpointStatusResponseBody) SetRequestId(v string) *UpdateInstanceEndpointStatusResponseBody { + s.RequestId = &v + return s +} + +type UpdateInstanceEndpointStatusResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateInstanceEndpointStatusResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateInstanceEndpointStatusResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateInstanceEndpointStatusResponse) GoString() string { + return s.String() +} + +func (s *UpdateInstanceEndpointStatusResponse) SetHeaders(v map[string]*string) *UpdateInstanceEndpointStatusResponse { + s.Headers = v + return s +} + +func (s *UpdateInstanceEndpointStatusResponse) SetBody(v *UpdateInstanceEndpointStatusResponseBody) *UpdateInstanceEndpointStatusResponse { + s.Body = v + return s +} + +type UpdateNamespaceRequest struct { + AutoCreateRepo *bool `json:"AutoCreateRepo,omitempty" xml:"AutoCreateRepo,omitempty"` + DefaultRepoType *string `json:"DefaultRepoType,omitempty" xml:"DefaultRepoType,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + NamespaceName *string `json:"NamespaceName,omitempty" xml:"NamespaceName,omitempty"` +} + +func (s UpdateNamespaceRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceRequest) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceRequest) SetAutoCreateRepo(v bool) *UpdateNamespaceRequest { + s.AutoCreateRepo = &v + return s +} + +func (s *UpdateNamespaceRequest) SetDefaultRepoType(v string) *UpdateNamespaceRequest { + s.DefaultRepoType = &v + return s +} + +func (s *UpdateNamespaceRequest) SetInstanceId(v string) *UpdateNamespaceRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateNamespaceRequest) SetNamespaceName(v string) *UpdateNamespaceRequest { + s.NamespaceName = &v + return s +} + +type UpdateNamespaceResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateNamespaceResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponseBody) SetCode(v string) *UpdateNamespaceResponseBody { + s.Code = &v + return s +} + +func (s *UpdateNamespaceResponseBody) SetIsSuccess(v bool) *UpdateNamespaceResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateNamespaceResponseBody) SetRequestId(v string) *UpdateNamespaceResponseBody { + s.RequestId = &v + return s +} + +type UpdateNamespaceResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateNamespaceResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateNamespaceResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateNamespaceResponse) GoString() string { + return s.String() +} + +func (s *UpdateNamespaceResponse) SetHeaders(v map[string]*string) *UpdateNamespaceResponse { + s.Headers = v + return s +} + +func (s *UpdateNamespaceResponse) SetBody(v *UpdateNamespaceResponseBody) *UpdateNamespaceResponse { + s.Body = v + return s +} + +type UpdateRepoBuildRuleRequest struct { + BuildArgs []*string `json:"BuildArgs,omitempty" xml:"BuildArgs,omitempty" type:"Repeated"` + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + DockerfileLocation *string `json:"DockerfileLocation,omitempty" xml:"DockerfileLocation,omitempty"` + DockerfileName *string `json:"DockerfileName,omitempty" xml:"DockerfileName,omitempty"` + ImageTag *string `json:"ImageTag,omitempty" xml:"ImageTag,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + Platforms []*string `json:"Platforms,omitempty" xml:"Platforms,omitempty" type:"Repeated"` + PushName *string `json:"PushName,omitempty" xml:"PushName,omitempty"` + PushType *string `json:"PushType,omitempty" xml:"PushType,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s UpdateRepoBuildRuleRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleRequest) SetBuildArgs(v []*string) *UpdateRepoBuildRuleRequest { + s.BuildArgs = v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetBuildRuleId(v string) *UpdateRepoBuildRuleRequest { + s.BuildRuleId = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetDockerfileLocation(v string) *UpdateRepoBuildRuleRequest { + s.DockerfileLocation = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetDockerfileName(v string) *UpdateRepoBuildRuleRequest { + s.DockerfileName = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetImageTag(v string) *UpdateRepoBuildRuleRequest { + s.ImageTag = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetInstanceId(v string) *UpdateRepoBuildRuleRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPlatforms(v []*string) *UpdateRepoBuildRuleRequest { + s.Platforms = v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPushName(v string) *UpdateRepoBuildRuleRequest { + s.PushName = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetPushType(v string) *UpdateRepoBuildRuleRequest { + s.PushType = &v + return s +} + +func (s *UpdateRepoBuildRuleRequest) SetRepoId(v string) *UpdateRepoBuildRuleRequest { + s.RepoId = &v + return s +} + +type UpdateRepoBuildRuleResponseBody struct { + BuildRuleId *string `json:"BuildRuleId,omitempty" xml:"BuildRuleId,omitempty"` + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoBuildRuleResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponseBody) SetBuildRuleId(v string) *UpdateRepoBuildRuleResponseBody { + s.BuildRuleId = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetCode(v string) *UpdateRepoBuildRuleResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetIsSuccess(v bool) *UpdateRepoBuildRuleResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoBuildRuleResponseBody) SetRequestId(v string) *UpdateRepoBuildRuleResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoBuildRuleResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoBuildRuleResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoBuildRuleResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoBuildRuleResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoBuildRuleResponse) SetHeaders(v map[string]*string) *UpdateRepoBuildRuleResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoBuildRuleResponse) SetBody(v *UpdateRepoBuildRuleResponseBody) *UpdateRepoBuildRuleResponse { + s.Body = v + return s +} + +type UpdateRepoSourceCodeRepoRequest struct { + AutoBuild *string `json:"AutoBuild,omitempty" xml:"AutoBuild,omitempty"` + CodeRepoId *string `json:"CodeRepoId,omitempty" xml:"CodeRepoId,omitempty"` + CodeRepoName *string `json:"CodeRepoName,omitempty" xml:"CodeRepoName,omitempty"` + CodeRepoNamespaceName *string `json:"CodeRepoNamespaceName,omitempty" xml:"CodeRepoNamespaceName,omitempty"` + CodeRepoType *string `json:"CodeRepoType,omitempty" xml:"CodeRepoType,omitempty"` + DisableCacheBuild *string `json:"DisableCacheBuild,omitempty" xml:"DisableCacheBuild,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + OverseaBuild *string `json:"OverseaBuild,omitempty" xml:"OverseaBuild,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` +} + +func (s UpdateRepoSourceCodeRepoRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetAutoBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.AutoBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoId(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoId = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoName(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoName = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoNamespaceName(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoNamespaceName = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetCodeRepoType(v string) *UpdateRepoSourceCodeRepoRequest { + s.CodeRepoType = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetDisableCacheBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.DisableCacheBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetInstanceId(v string) *UpdateRepoSourceCodeRepoRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetOverseaBuild(v string) *UpdateRepoSourceCodeRepoRequest { + s.OverseaBuild = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoRequest) SetRepoId(v string) *UpdateRepoSourceCodeRepoRequest { + s.RepoId = &v + return s +} + +type UpdateRepoSourceCodeRepoResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoSourceCodeRepoResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetCode(v string) *UpdateRepoSourceCodeRepoResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetIsSuccess(v bool) *UpdateRepoSourceCodeRepoResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponseBody) SetRequestId(v string) *UpdateRepoSourceCodeRepoResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoSourceCodeRepoResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoSourceCodeRepoResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoSourceCodeRepoResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoSourceCodeRepoResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoSourceCodeRepoResponse) SetHeaders(v map[string]*string) *UpdateRepoSourceCodeRepoResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoSourceCodeRepoResponse) SetBody(v *UpdateRepoSourceCodeRepoResponseBody) *UpdateRepoSourceCodeRepoResponse { + s.Body = v + return s +} + +type UpdateRepoTriggerRequest struct { + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + TriggerId *string `json:"TriggerId,omitempty" xml:"TriggerId,omitempty"` + TriggerName *string `json:"TriggerName,omitempty" xml:"TriggerName,omitempty"` + TriggerTag *string `json:"TriggerTag,omitempty" xml:"TriggerTag,omitempty"` + TriggerType *string `json:"TriggerType,omitempty" xml:"TriggerType,omitempty"` + TriggerUrl *string `json:"TriggerUrl,omitempty" xml:"TriggerUrl,omitempty"` +} + +func (s UpdateRepoTriggerRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerRequest) SetInstanceId(v string) *UpdateRepoTriggerRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetRepoId(v string) *UpdateRepoTriggerRequest { + s.RepoId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerId(v string) *UpdateRepoTriggerRequest { + s.TriggerId = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerName(v string) *UpdateRepoTriggerRequest { + s.TriggerName = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerTag(v string) *UpdateRepoTriggerRequest { + s.TriggerTag = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerType(v string) *UpdateRepoTriggerRequest { + s.TriggerType = &v + return s +} + +func (s *UpdateRepoTriggerRequest) SetTriggerUrl(v string) *UpdateRepoTriggerRequest { + s.TriggerUrl = &v + return s +} + +type UpdateRepoTriggerResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepoTriggerResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerResponseBody) SetCode(v string) *UpdateRepoTriggerResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepoTriggerResponseBody) SetIsSuccess(v bool) *UpdateRepoTriggerResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepoTriggerResponseBody) SetRequestId(v string) *UpdateRepoTriggerResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepoTriggerResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepoTriggerResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepoTriggerResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepoTriggerResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepoTriggerResponse) SetHeaders(v map[string]*string) *UpdateRepoTriggerResponse { + s.Headers = v + return s +} + +func (s *UpdateRepoTriggerResponse) SetBody(v *UpdateRepoTriggerResponseBody) *UpdateRepoTriggerResponse { + s.Body = v + return s +} + +type UpdateRepositoryRequest struct { + Detail *string `json:"Detail,omitempty" xml:"Detail,omitempty"` + InstanceId *string `json:"InstanceId,omitempty" xml:"InstanceId,omitempty"` + RepoId *string `json:"RepoId,omitempty" xml:"RepoId,omitempty"` + RepoType *string `json:"RepoType,omitempty" xml:"RepoType,omitempty"` + Summary *string `json:"Summary,omitempty" xml:"Summary,omitempty"` + TagImmutability *bool `json:"TagImmutability,omitempty" xml:"TagImmutability,omitempty"` +} + +func (s UpdateRepositoryRequest) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryRequest) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryRequest) SetDetail(v string) *UpdateRepositoryRequest { + s.Detail = &v + return s +} + +func (s *UpdateRepositoryRequest) SetInstanceId(v string) *UpdateRepositoryRequest { + s.InstanceId = &v + return s +} + +func (s *UpdateRepositoryRequest) SetRepoId(v string) *UpdateRepositoryRequest { + s.RepoId = &v + return s +} + +func (s *UpdateRepositoryRequest) SetRepoType(v string) *UpdateRepositoryRequest { + s.RepoType = &v + return s +} + +func (s *UpdateRepositoryRequest) SetSummary(v string) *UpdateRepositoryRequest { + s.Summary = &v + return s +} + +func (s *UpdateRepositoryRequest) SetTagImmutability(v bool) *UpdateRepositoryRequest { + s.TagImmutability = &v + return s +} + +type UpdateRepositoryResponseBody struct { + Code *string `json:"Code,omitempty" xml:"Code,omitempty"` + IsSuccess *bool `json:"IsSuccess,omitempty" xml:"IsSuccess,omitempty"` + RequestId *string `json:"RequestId,omitempty" xml:"RequestId,omitempty"` +} + +func (s UpdateRepositoryResponseBody) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryResponseBody) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryResponseBody) SetCode(v string) *UpdateRepositoryResponseBody { + s.Code = &v + return s +} + +func (s *UpdateRepositoryResponseBody) SetIsSuccess(v bool) *UpdateRepositoryResponseBody { + s.IsSuccess = &v + return s +} + +func (s *UpdateRepositoryResponseBody) SetRequestId(v string) *UpdateRepositoryResponseBody { + s.RequestId = &v + return s +} + +type UpdateRepositoryResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *UpdateRepositoryResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s UpdateRepositoryResponse) String() string { + return tea.Prettify(s) +} + +func (s UpdateRepositoryResponse) GoString() string { + return s.String() +} + +func (s *UpdateRepositoryResponse) SetHeaders(v map[string]*string) *UpdateRepositoryResponse { + s.Headers = v + return s +} + +func (s *UpdateRepositoryResponse) SetBody(v *UpdateRepositoryResponseBody) *UpdateRepositoryResponse { + s.Body = v + return s +} + +type Client struct { + openapi.Client +} + +func NewClient(config *openapi.Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *openapi.Config) (_err error) { + _err = client.Client.Init(config) + if _err != nil { + return _err + } + client.EndpointRule = tea.String("regional") + _err = client.CheckConfig(config) + if _err != nil { + return _err + } + client.Endpoint, _err = client.GetEndpoint(tea.String("cr"), client.RegionId, client.EndpointRule, client.Network, client.Suffix, client.EndpointMap, client.Endpoint) + if _err != nil { + return _err + } + + return nil +} + +func (client *Client) GetEndpoint(productId *string, regionId *string, endpointRule *string, network *string, suffix *string, endpointMap map[string]*string, endpoint *string) (_result *string, _err error) { + if !tea.BoolValue(util.Empty(endpoint)) { + _result = endpoint + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(endpointMap)) && !tea.BoolValue(util.Empty(endpointMap[tea.StringValue(regionId)])) { + _result = endpointMap[tea.StringValue(regionId)] + return _result, _err + } + + _body, _err := endpointutil.GetEndpointRules(productId, regionId, endpointRule, network, suffix) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelArtifactBuildTaskWithOptions(request *CancelArtifactBuildTaskRequest, runtime *util.RuntimeOptions) (_result *CancelArtifactBuildTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildTaskId)) { + query["BuildTaskId"] = request.BuildTaskId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CancelArtifactBuildTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CancelArtifactBuildTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CancelArtifactBuildTask(request *CancelArtifactBuildTaskRequest) (_result *CancelArtifactBuildTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CancelArtifactBuildTaskResponse{} + _body, _err := client.CancelArtifactBuildTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CancelRepoBuildRecordWithOptions(request *CancelRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *CancelRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CancelRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CancelRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CancelRepoBuildRecord(request *CancelRepoBuildRecordRequest) (_result *CancelRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CancelRepoBuildRecordResponse{} + _body, _err := client.CancelRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateBuildRecordByRuleWithOptions(request *CreateBuildRecordByRuleRequest, runtime *util.RuntimeOptions) (_result *CreateBuildRecordByRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateBuildRecordByRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateBuildRecordByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateBuildRecordByRule(request *CreateBuildRecordByRuleRequest) (_result *CreateBuildRecordByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateBuildRecordByRuleResponse{} + _body, _err := client.CreateBuildRecordByRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChainWithOptions(request *CreateChainRequest, runtime *util.RuntimeOptions) (_result *CreateChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainConfig)) { + query["ChainConfig"] = request.ChainConfig + } + + if !tea.BoolValue(util.IsUnset(request.Description)) { + query["Description"] = request.Description + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Name)) { + query["Name"] = request.Name + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChain(request *CreateChainRequest) (_result *CreateChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChainResponse{} + _body, _err := client.CreateChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChartNamespaceWithOptions(request *CreateChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *CreateChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChartNamespace(request *CreateChartNamespaceRequest) (_result *CreateChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChartNamespaceResponse{} + _body, _err := client.CreateChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateChartRepositoryWithOptions(request *CreateChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *CreateChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateChartRepository(request *CreateChartRepositoryRequest) (_result *CreateChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateChartRepositoryResponse{} + _body, _err := client.CreateChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateInstanceEndpointAclPolicyWithOptions(request *CreateInstanceEndpointAclPolicyRequest, runtime *util.RuntimeOptions) (_result *CreateInstanceEndpointAclPolicyResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Comment)) { + query["Comment"] = request.Comment + } + + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.Entry)) { + query["Entry"] = request.Entry + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateInstanceEndpointAclPolicy"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateInstanceEndpointAclPolicyResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateInstanceEndpointAclPolicy(request *CreateInstanceEndpointAclPolicyRequest) (_result *CreateInstanceEndpointAclPolicyResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateInstanceEndpointAclPolicyResponse{} + _body, _err := client.CreateInstanceEndpointAclPolicyWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateInstanceVpcEndpointLinkedVpcWithOptions(request *CreateInstanceVpcEndpointLinkedVpcRequest, runtime *util.RuntimeOptions) (_result *CreateInstanceVpcEndpointLinkedVpcResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EnableCreateDNSRecordInPvzt)) { + query["EnableCreateDNSRecordInPvzt"] = request.EnableCreateDNSRecordInPvzt + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + if !tea.BoolValue(util.IsUnset(request.VpcId)) { + query["VpcId"] = request.VpcId + } + + if !tea.BoolValue(util.IsUnset(request.VswitchId)) { + query["VswitchId"] = request.VswitchId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateInstanceVpcEndpointLinkedVpc"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateInstanceVpcEndpointLinkedVpc(request *CreateInstanceVpcEndpointLinkedVpcRequest) (_result *CreateInstanceVpcEndpointLinkedVpcResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CreateInstanceVpcEndpointLinkedVpcWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateNamespaceWithOptions(request *CreateNamespaceRequest, runtime *util.RuntimeOptions) (_result *CreateNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateNamespace(request *CreateNamespaceRequest) (_result *CreateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateNamespaceResponse{} + _body, _err := client.CreateNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoBuildRuleWithOptions(request *CreateRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildArgs)) { + query["BuildArgs"] = request.BuildArgs + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileLocation)) { + query["DockerfileLocation"] = request.DockerfileLocation + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileName)) { + query["DockerfileName"] = request.DockerfileName + } + + if !tea.BoolValue(util.IsUnset(request.ImageTag)) { + query["ImageTag"] = request.ImageTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Platforms)) { + query["Platforms"] = request.Platforms + } + + if !tea.BoolValue(util.IsUnset(request.PushName)) { + query["PushName"] = request.PushName + } + + if !tea.BoolValue(util.IsUnset(request.PushType)) { + query["PushType"] = request.PushType + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoBuildRule(request *CreateRepoBuildRuleRequest) (_result *CreateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoBuildRuleResponse{} + _body, _err := client.CreateRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSourceCodeRepoWithOptions(request *CreateRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoBuild)) { + query["AutoBuild"] = request.AutoBuild + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoName)) { + query["CodeRepoName"] = request.CodeRepoName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoNamespaceName)) { + query["CodeRepoNamespaceName"] = request.CodeRepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoType)) { + query["CodeRepoType"] = request.CodeRepoType + } + + if !tea.BoolValue(util.IsUnset(request.DisableCacheBuild)) { + query["DisableCacheBuild"] = request.DisableCacheBuild + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.OverseaBuild)) { + query["OverseaBuild"] = request.OverseaBuild + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSourceCodeRepo(request *CreateRepoSourceCodeRepoRequest) (_result *CreateRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSourceCodeRepoResponse{} + _body, _err := client.CreateRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncRuleWithOptions(request *CreateRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleName)) { + query["SyncRuleName"] = request.SyncRuleName + } + + if !tea.BoolValue(util.IsUnset(request.SyncScope)) { + query["SyncScope"] = request.SyncScope + } + + if !tea.BoolValue(util.IsUnset(request.SyncTrigger)) { + query["SyncTrigger"] = request.SyncTrigger + } + + if !tea.BoolValue(util.IsUnset(request.TagFilter)) { + query["TagFilter"] = request.TagFilter + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetNamespaceName)) { + query["TargetNamespaceName"] = request.TargetNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRepoName)) { + query["TargetRepoName"] = request.TargetRepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetUserId)) { + query["TargetUserId"] = request.TargetUserId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncRule(request *CreateRepoSyncRuleRequest) (_result *CreateRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncRuleResponse{} + _body, _err := client.CreateRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskWithOptions(request *CreateRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Override)) { + query["Override"] = request.Override + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetNamespace)) { + query["TargetNamespace"] = request.TargetNamespace + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRepoName)) { + query["TargetRepoName"] = request.TargetRepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetTag)) { + query["TargetTag"] = request.TargetTag + } + + if !tea.BoolValue(util.IsUnset(request.TargetUserId)) { + query["TargetUserId"] = request.TargetUserId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncTask(request *CreateRepoSyncTaskRequest) (_result *CreateRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncTaskResponse{} + _body, _err := client.CreateRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskByRuleWithOptions(request *CreateRepoSyncTaskByRuleRequest, runtime *util.RuntimeOptions) (_result *CreateRepoSyncTaskByRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleId)) { + query["SyncRuleId"] = request.SyncRuleId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoSyncTaskByRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoSyncTaskByRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoSyncTaskByRule(request *CreateRepoSyncTaskByRuleRequest) (_result *CreateRepoSyncTaskByRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoSyncTaskByRuleResponse{} + _body, _err := client.CreateRepoSyncTaskByRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTagWithOptions(request *CreateRepoTagRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.FromTag)) { + query["FromTag"] = request.FromTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.ToTag)) { + query["ToTag"] = request.ToTag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTag(request *CreateRepoTagRequest) (_result *CreateRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTagResponse{} + _body, _err := client.CreateRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTagScanTaskWithOptions(request *CreateRepoTagScanTaskRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTagScanTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanService)) { + query["ScanService"] = request.ScanService + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTagScanTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTagScanTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTagScanTask(request *CreateRepoTagScanTaskRequest) (_result *CreateRepoTagScanTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTagScanTaskResponse{} + _body, _err := client.CreateRepoTagScanTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepoTriggerWithOptions(request *CreateRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *CreateRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerName)) { + query["TriggerName"] = request.TriggerName + } + + if !tea.BoolValue(util.IsUnset(request.TriggerTag)) { + query["TriggerTag"] = request.TriggerTag + } + + if !tea.BoolValue(util.IsUnset(request.TriggerType)) { + query["TriggerType"] = request.TriggerType + } + + if !tea.BoolValue(util.IsUnset(request.TriggerUrl)) { + query["TriggerUrl"] = request.TriggerUrl + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepoTrigger(request *CreateRepoTriggerRequest) (_result *CreateRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepoTriggerResponse{} + _body, _err := client.CreateRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) CreateRepositoryWithOptions(request *CreateRepositoryRequest, runtime *util.RuntimeOptions) (_result *CreateRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Detail)) { + query["Detail"] = request.Detail + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + if !tea.BoolValue(util.IsUnset(request.TagImmutability)) { + query["TagImmutability"] = request.TagImmutability + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("CreateRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &CreateRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) CreateRepository(request *CreateRepositoryRequest) (_result *CreateRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &CreateRepositoryResponse{} + _body, _err := client.CreateRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChainWithOptions(request *DeleteChainRequest, runtime *util.RuntimeOptions) (_result *DeleteChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChain(request *DeleteChainRequest) (_result *DeleteChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChainResponse{} + _body, _err := client.DeleteChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartNamespaceWithOptions(request *DeleteChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *DeleteChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartNamespace(request *DeleteChartNamespaceRequest) (_result *DeleteChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartNamespaceResponse{} + _body, _err := client.DeleteChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartReleaseWithOptions(request *DeleteChartReleaseRequest, runtime *util.RuntimeOptions) (_result *DeleteChartReleaseResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Chart)) { + query["Chart"] = request.Chart + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Release)) { + query["Release"] = request.Release + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartRelease"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartReleaseResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartRelease(request *DeleteChartReleaseRequest) (_result *DeleteChartReleaseResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartReleaseResponse{} + _body, _err := client.DeleteChartReleaseWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteChartRepositoryWithOptions(request *DeleteChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *DeleteChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteChartRepository(request *DeleteChartRepositoryRequest) (_result *DeleteChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteChartRepositoryResponse{} + _body, _err := client.DeleteChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteEventCenterRuleWithOptions(request *DeleteEventCenterRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteEventCenterRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RuleId)) { + query["RuleId"] = request.RuleId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteEventCenterRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteEventCenterRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteEventCenterRule(request *DeleteEventCenterRuleRequest) (_result *DeleteEventCenterRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteEventCenterRuleResponse{} + _body, _err := client.DeleteEventCenterRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteInstanceEndpointAclPolicyWithOptions(request *DeleteInstanceEndpointAclPolicyRequest, runtime *util.RuntimeOptions) (_result *DeleteInstanceEndpointAclPolicyResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.Entry)) { + query["Entry"] = request.Entry + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteInstanceEndpointAclPolicy"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteInstanceEndpointAclPolicyResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteInstanceEndpointAclPolicy(request *DeleteInstanceEndpointAclPolicyRequest) (_result *DeleteInstanceEndpointAclPolicyResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteInstanceEndpointAclPolicyResponse{} + _body, _err := client.DeleteInstanceEndpointAclPolicyWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteInstanceVpcEndpointLinkedVpcWithOptions(request *DeleteInstanceVpcEndpointLinkedVpcRequest, runtime *util.RuntimeOptions) (_result *DeleteInstanceVpcEndpointLinkedVpcResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + if !tea.BoolValue(util.IsUnset(request.VpcId)) { + query["VpcId"] = request.VpcId + } + + if !tea.BoolValue(util.IsUnset(request.VswitchId)) { + query["VswitchId"] = request.VswitchId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteInstanceVpcEndpointLinkedVpc"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteInstanceVpcEndpointLinkedVpc(request *DeleteInstanceVpcEndpointLinkedVpcRequest) (_result *DeleteInstanceVpcEndpointLinkedVpcResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteInstanceVpcEndpointLinkedVpcResponse{} + _body, _err := client.DeleteInstanceVpcEndpointLinkedVpcWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteNamespaceWithOptions(request *DeleteNamespaceRequest, runtime *util.RuntimeOptions) (_result *DeleteNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteNamespace(request *DeleteNamespaceRequest) (_result *DeleteNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteNamespaceResponse{} + _body, _err := client.DeleteNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoBuildRuleWithOptions(request *DeleteRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoBuildRule(request *DeleteRepoBuildRuleRequest) (_result *DeleteRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoBuildRuleResponse{} + _body, _err := client.DeleteRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoSyncRuleWithOptions(request *DeleteRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.SyncRuleId)) { + query["SyncRuleId"] = request.SyncRuleId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoSyncRule(request *DeleteRepoSyncRuleRequest) (_result *DeleteRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoSyncRuleResponse{} + _body, _err := client.DeleteRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoTagWithOptions(request *DeleteRepoTagRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoTag(request *DeleteRepoTagRequest) (_result *DeleteRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoTagResponse{} + _body, _err := client.DeleteRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepoTriggerWithOptions(request *DeleteRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *DeleteRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerId)) { + query["TriggerId"] = request.TriggerId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepoTrigger(request *DeleteRepoTriggerRequest) (_result *DeleteRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepoTriggerResponse{} + _body, _err := client.DeleteRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) DeleteRepositoryWithOptions(request *DeleteRepositoryRequest, runtime *util.RuntimeOptions) (_result *DeleteRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("DeleteRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &DeleteRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) DeleteRepository(request *DeleteRepositoryRequest) (_result *DeleteRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &DeleteRepositoryResponse{} + _body, _err := client.DeleteRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetArtifactBuildTaskWithOptions(request *GetArtifactBuildTaskRequest, runtime *util.RuntimeOptions) (_result *GetArtifactBuildTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetArtifactBuildTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetArtifactBuildTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetArtifactBuildTask(request *GetArtifactBuildTaskRequest) (_result *GetArtifactBuildTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetArtifactBuildTaskResponse{} + _body, _err := client.GetArtifactBuildTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetAuthorizationTokenWithOptions(request *GetAuthorizationTokenRequest, runtime *util.RuntimeOptions) (_result *GetAuthorizationTokenResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetAuthorizationToken"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetAuthorizationToken(request *GetAuthorizationTokenRequest) (_result *GetAuthorizationTokenResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetAuthorizationTokenResponse{} + _body, _err := client.GetAuthorizationTokenWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChainWithOptions(request *GetChainRequest, runtime *util.RuntimeOptions) (_result *GetChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChain(request *GetChainRequest) (_result *GetChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChainResponse{} + _body, _err := client.GetChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChartNamespaceWithOptions(request *GetChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *GetChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChartNamespace(request *GetChartNamespaceRequest) (_result *GetChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChartNamespaceResponse{} + _body, _err := client.GetChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetChartRepositoryWithOptions(request *GetChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *GetChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetChartRepository(request *GetChartRepositoryRequest) (_result *GetChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetChartRepositoryResponse{} + _body, _err := client.GetChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceWithOptions(request *GetInstanceRequest, runtime *util.RuntimeOptions) (_result *GetInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstance(request *GetInstanceRequest) (_result *GetInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceResponse{} + _body, _err := client.GetInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceCountWithOptions(runtime *util.RuntimeOptions) (_result *GetInstanceCountResponse, _err error) { + req := &openapi.OpenApiRequest{} + params := &openapi.Params{ + Action: tea.String("GetInstanceCount"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceCountResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceCount() (_result *GetInstanceCountResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceCountResponse{} + _body, _err := client.GetInstanceCountWithOptions(runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceEndpointWithOptions(request *GetInstanceEndpointRequest, runtime *util.RuntimeOptions) (_result *GetInstanceEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceEndpoint(request *GetInstanceEndpointRequest) (_result *GetInstanceEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceEndpointResponse{} + _body, _err := client.GetInstanceEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceUsageWithOptions(request *GetInstanceUsageRequest, runtime *util.RuntimeOptions) (_result *GetInstanceUsageResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceUsage"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceUsageResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceUsage(request *GetInstanceUsageRequest) (_result *GetInstanceUsageResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceUsageResponse{} + _body, _err := client.GetInstanceUsageWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetInstanceVpcEndpointWithOptions(request *GetInstanceVpcEndpointRequest, runtime *util.RuntimeOptions) (_result *GetInstanceVpcEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetInstanceVpcEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetInstanceVpcEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetInstanceVpcEndpoint(request *GetInstanceVpcEndpointRequest) (_result *GetInstanceVpcEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetInstanceVpcEndpointResponse{} + _body, _err := client.GetInstanceVpcEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetNamespaceWithOptions(request *GetNamespaceRequest, runtime *util.RuntimeOptions) (_result *GetNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceId)) { + query["NamespaceId"] = request.NamespaceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetNamespace(request *GetNamespaceRequest) (_result *GetNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetNamespaceResponse{} + _body, _err := client.GetNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRecordWithOptions(request *GetRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *GetRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRecord(request *GetRepoBuildRecordRequest) (_result *GetRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoBuildRecordResponse{} + _body, _err := client.GetRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoBuildRecordStatusWithOptions(request *GetRepoBuildRecordStatusRequest, runtime *util.RuntimeOptions) (_result *GetRepoBuildRecordStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoBuildRecordStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoBuildRecordStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoBuildRecordStatus(request *GetRepoBuildRecordStatusRequest) (_result *GetRepoBuildRecordStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoBuildRecordStatusResponse{} + _body, _err := client.GetRepoBuildRecordStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoSourceCodeRepoWithOptions(request *GetRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *GetRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoSourceCodeRepo(request *GetRepoSourceCodeRepoRequest) (_result *GetRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoSourceCodeRepoResponse{} + _body, _err := client.GetRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoSyncTaskWithOptions(request *GetRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *GetRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.SyncTaskId)) { + query["SyncTaskId"] = request.SyncTaskId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoSyncTask(request *GetRepoSyncTaskRequest) (_result *GetRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoSyncTaskResponse{} + _body, _err := client.GetRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagWithOptions(request *GetRepoTagRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTag(request *GetRepoTagRequest) (_result *GetRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagResponse{} + _body, _err := client.GetRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagLayersWithOptions(request *GetRepoTagLayersRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagLayersResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagLayers"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagLayersResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagLayers(request *GetRepoTagLayersRequest) (_result *GetRepoTagLayersResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagLayersResponse{} + _body, _err := client.GetRepoTagLayersWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagManifestWithOptions(request *GetRepoTagManifestRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagManifestResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.SchemaVersion)) { + query["SchemaVersion"] = request.SchemaVersion + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagManifest"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagManifestResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagManifest(request *GetRepoTagManifestRequest) (_result *GetRepoTagManifestResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagManifestResponse{} + _body, _err := client.GetRepoTagManifestWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanStatusWithOptions(request *GetRepoTagScanStatusRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagScanStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanStatus(request *GetRepoTagScanStatusRequest) (_result *GetRepoTagScanStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagScanStatusResponse{} + _body, _err := client.GetRepoTagScanStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepoTagScanSummaryWithOptions(request *GetRepoTagScanSummaryRequest, runtime *util.RuntimeOptions) (_result *GetRepoTagScanSummaryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepoTagScanSummary"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepoTagScanSummary(request *GetRepoTagScanSummaryRequest) (_result *GetRepoTagScanSummaryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepoTagScanSummaryResponse{} + _body, _err := client.GetRepoTagScanSummaryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) GetRepositoryWithOptions(request *GetRepositoryRequest, runtime *util.RuntimeOptions) (_result *GetRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("GetRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &GetRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) GetRepository(request *GetRepositoryRequest) (_result *GetRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &GetRepositoryResponse{} + _body, _err := client.GetRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListArtifactBuildTaskLogWithOptions(request *ListArtifactBuildTaskLogRequest, runtime *util.RuntimeOptions) (_result *ListArtifactBuildTaskLogResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListArtifactBuildTaskLog"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListArtifactBuildTaskLogResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListArtifactBuildTaskLog(request *ListArtifactBuildTaskLogRequest) (_result *ListArtifactBuildTaskLogResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListArtifactBuildTaskLogResponse{} + _body, _err := client.ListArtifactBuildTaskLogWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChainWithOptions(request *ListChainRequest, runtime *util.RuntimeOptions) (_result *ListChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChain(request *ListChainRequest) (_result *ListChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChainResponse{} + _body, _err := client.ListChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChainInstanceWithOptions(request *ListChainInstanceRequest, runtime *util.RuntimeOptions) (_result *ListChainInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChainInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChainInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChainInstance(request *ListChainInstanceRequest) (_result *ListChainInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChainInstanceResponse{} + _body, _err := client.ListChainInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartWithOptions(request *ListChartRequest, runtime *util.RuntimeOptions) (_result *ListChartResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChart"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChart(request *ListChartRequest) (_result *ListChartResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartResponse{} + _body, _err := client.ListChartWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartNamespaceWithOptions(request *ListChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *ListChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceStatus)) { + query["NamespaceStatus"] = request.NamespaceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartNamespace(request *ListChartNamespaceRequest) (_result *ListChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartNamespaceResponse{} + _body, _err := client.ListChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartReleaseWithOptions(request *ListChartReleaseRequest, runtime *util.RuntimeOptions) (_result *ListChartReleaseResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Chart)) { + query["Chart"] = request.Chart + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartRelease"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartReleaseResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartRelease(request *ListChartReleaseRequest) (_result *ListChartReleaseResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartReleaseResponse{} + _body, _err := client.ListChartReleaseWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListChartRepositoryWithOptions(request *ListChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *ListChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoStatus)) { + query["RepoStatus"] = request.RepoStatus + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListChartRepository(request *ListChartRepositoryRequest) (_result *ListChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListChartRepositoryResponse{} + _body, _err := client.ListChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListEventCenterRecordWithOptions(request *ListEventCenterRecordRequest, runtime *util.RuntimeOptions) (_result *ListEventCenterRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListEventCenterRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListEventCenterRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListEventCenterRecord(request *ListEventCenterRecordRequest) (_result *ListEventCenterRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListEventCenterRecordResponse{} + _body, _err := client.ListEventCenterRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListEventCenterRuleNameWithOptions(request *ListEventCenterRuleNameRequest, runtime *util.RuntimeOptions) (_result *ListEventCenterRuleNameResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := openapiutil.Query(util.ToMap(request)) + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListEventCenterRuleName"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListEventCenterRuleNameResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListEventCenterRuleName(request *ListEventCenterRuleNameRequest) (_result *ListEventCenterRuleNameResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListEventCenterRuleNameResponse{} + _body, _err := client.ListEventCenterRuleNameWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceWithOptions(request *ListInstanceRequest, runtime *util.RuntimeOptions) (_result *ListInstanceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceName)) { + query["InstanceName"] = request.InstanceName + } + + if !tea.BoolValue(util.IsUnset(request.InstanceStatus)) { + query["InstanceStatus"] = request.InstanceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstance"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstance(request *ListInstanceRequest) (_result *ListInstanceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceResponse{} + _body, _err := client.ListInstanceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceEndpointWithOptions(request *ListInstanceEndpointRequest, runtime *util.RuntimeOptions) (_result *ListInstanceEndpointResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstanceEndpoint"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceEndpointResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstanceEndpoint(request *ListInstanceEndpointRequest) (_result *ListInstanceEndpointResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceEndpointResponse{} + _body, _err := client.ListInstanceEndpointWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListInstanceRegionWithOptions(request *ListInstanceRegionRequest, runtime *util.RuntimeOptions) (_result *ListInstanceRegionResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Lang)) { + query["Lang"] = request.Lang + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListInstanceRegion"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListInstanceRegionResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListInstanceRegion(request *ListInstanceRegionRequest) (_result *ListInstanceRegionResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListInstanceRegionResponse{} + _body, _err := client.ListInstanceRegionWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListNamespaceWithOptions(request *ListNamespaceRequest, runtime *util.RuntimeOptions) (_result *ListNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceStatus)) { + query["NamespaceStatus"] = request.NamespaceStatus + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListNamespace(request *ListNamespaceRequest) (_result *ListNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListNamespaceResponse{} + _body, _err := client.ListNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRecordWithOptions(request *ListRepoBuildRecordRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRecordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRecord"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRecordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRecord(request *ListRepoBuildRecordRequest) (_result *ListRepoBuildRecordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRecordResponse{} + _body, _err := client.ListRepoBuildRecordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRecordLogWithOptions(request *ListRepoBuildRecordLogRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRecordLogResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildRecordId)) { + query["BuildRecordId"] = request.BuildRecordId + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Offset)) { + query["Offset"] = request.Offset + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRecordLog"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRecordLogResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRecordLog(request *ListRepoBuildRecordLogRequest) (_result *ListRepoBuildRecordLogResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRecordLogResponse{} + _body, _err := client.ListRepoBuildRecordLogWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoBuildRuleWithOptions(request *ListRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *ListRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoBuildRule(request *ListRepoBuildRuleRequest) (_result *ListRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoBuildRuleResponse{} + _body, _err := client.ListRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoSyncRuleWithOptions(request *ListRepoSyncRuleRequest, runtime *util.RuntimeOptions) (_result *ListRepoSyncRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.TargetInstanceId)) { + query["TargetInstanceId"] = request.TargetInstanceId + } + + if !tea.BoolValue(util.IsUnset(request.TargetRegionId)) { + query["TargetRegionId"] = request.TargetRegionId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoSyncRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoSyncRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoSyncRule(request *ListRepoSyncRuleRequest) (_result *ListRepoSyncRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoSyncRuleResponse{} + _body, _err := client.ListRepoSyncRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoSyncTaskWithOptions(request *ListRepoSyncTaskRequest, runtime *util.RuntimeOptions) (_result *ListRepoSyncTaskResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.SyncRecordId)) { + query["SyncRecordId"] = request.SyncRecordId + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoSyncTask"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoSyncTaskResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoSyncTask(request *ListRepoSyncTaskRequest) (_result *ListRepoSyncTaskResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoSyncTaskResponse{} + _body, _err := client.ListRepoSyncTaskWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTagWithOptions(request *ListRepoTagRequest, runtime *util.RuntimeOptions) (_result *ListRepoTagResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTag"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTagResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTag(request *ListRepoTagRequest) (_result *ListRepoTagResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTagResponse{} + _body, _err := client.ListRepoTagWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTagScanResultWithOptions(request *ListRepoTagScanResultRequest, runtime *util.RuntimeOptions) (_result *ListRepoTagScanResultResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Digest)) { + query["Digest"] = request.Digest + } + + if !tea.BoolValue(util.IsUnset(request.FilterValue)) { + query["FilterValue"] = request.FilterValue + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.ScanTaskId)) { + query["ScanTaskId"] = request.ScanTaskId + } + + if !tea.BoolValue(util.IsUnset(request.ScanType)) { + query["ScanType"] = request.ScanType + } + + if !tea.BoolValue(util.IsUnset(request.Severity)) { + query["Severity"] = request.Severity + } + + if !tea.BoolValue(util.IsUnset(request.Tag)) { + query["Tag"] = request.Tag + } + + if !tea.BoolValue(util.IsUnset(request.VulQueryKey)) { + query["VulQueryKey"] = request.VulQueryKey + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTagScanResult"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTagScanResultResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTagScanResult(request *ListRepoTagScanResultRequest) (_result *ListRepoTagScanResultResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTagScanResultResponse{} + _body, _err := client.ListRepoTagScanResultWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepoTriggerWithOptions(request *ListRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *ListRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepoTrigger(request *ListRepoTriggerRequest) (_result *ListRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepoTriggerResponse{} + _body, _err := client.ListRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ListRepositoryWithOptions(request *ListRepositoryRequest, runtime *util.RuntimeOptions) (_result *ListRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.PageNo)) { + query["PageNo"] = request.PageNo + } + + if !tea.BoolValue(util.IsUnset(request.PageSize)) { + query["PageSize"] = request.PageSize + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoStatus)) { + query["RepoStatus"] = request.RepoStatus + } + + if !tea.BoolValue(util.IsUnset(request.ResourceGroupId)) { + query["ResourceGroupId"] = request.ResourceGroupId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ListRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ListRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ListRepository(request *ListRepositoryRequest) (_result *ListRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ListRepositoryResponse{} + _body, _err := client.ListRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) ResetLoginPasswordWithOptions(request *ResetLoginPasswordRequest, runtime *util.RuntimeOptions) (_result *ResetLoginPasswordResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Password)) { + query["Password"] = request.Password + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("ResetLoginPassword"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &ResetLoginPasswordResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) ResetLoginPassword(request *ResetLoginPasswordRequest) (_result *ResetLoginPasswordResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &ResetLoginPasswordResponse{} + _body, _err := client.ResetLoginPasswordWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChainWithOptions(request *UpdateChainRequest, runtime *util.RuntimeOptions) (_result *UpdateChainResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.ChainConfig)) { + query["ChainConfig"] = request.ChainConfig + } + + if !tea.BoolValue(util.IsUnset(request.ChainId)) { + query["ChainId"] = request.ChainId + } + + if !tea.BoolValue(util.IsUnset(request.Description)) { + query["Description"] = request.Description + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Name)) { + query["Name"] = request.Name + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChain"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChainResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChain(request *UpdateChainRequest) (_result *UpdateChainResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChainResponse{} + _body, _err := client.UpdateChainWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChartNamespaceWithOptions(request *UpdateChartNamespaceRequest, runtime *util.RuntimeOptions) (_result *UpdateChartNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChartNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChartNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChartNamespace(request *UpdateChartNamespaceRequest) (_result *UpdateChartNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChartNamespaceResponse{} + _body, _err := client.UpdateChartNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateChartRepositoryWithOptions(request *UpdateChartRepositoryRequest, runtime *util.RuntimeOptions) (_result *UpdateChartRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoName)) { + query["RepoName"] = request.RepoName + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamespaceName)) { + query["RepoNamespaceName"] = request.RepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateChartRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateChartRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateChartRepository(request *UpdateChartRepositoryRequest) (_result *UpdateChartRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateChartRepositoryResponse{} + _body, _err := client.UpdateChartRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateEventCenterRuleWithOptions(tmpReq *UpdateEventCenterRuleRequest, runtime *util.RuntimeOptions) (_result *UpdateEventCenterRuleResponse, _err error) { + _err = util.ValidateModel(tmpReq) + if _err != nil { + return _result, _err + } + request := &UpdateEventCenterRuleShrinkRequest{} + openapiutil.Convert(tmpReq, request) + if !tea.BoolValue(util.IsUnset(tmpReq.Namespaces)) { + request.NamespacesShrink = openapiutil.ArrayToStringWithSpecifiedStyle(tmpReq.Namespaces, tea.String("Namespaces"), tea.String("json")) + } + + if !tea.BoolValue(util.IsUnset(tmpReq.RepoNames)) { + request.RepoNamesShrink = openapiutil.ArrayToStringWithSpecifiedStyle(tmpReq.RepoNames, tea.String("RepoNames"), tea.String("json")) + } + + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.EventChannel)) { + query["EventChannel"] = request.EventChannel + } + + if !tea.BoolValue(util.IsUnset(request.EventConfig)) { + query["EventConfig"] = request.EventConfig + } + + if !tea.BoolValue(util.IsUnset(request.EventScope)) { + query["EventScope"] = request.EventScope + } + + if !tea.BoolValue(util.IsUnset(request.EventType)) { + query["EventType"] = request.EventType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespacesShrink)) { + query["Namespaces"] = request.NamespacesShrink + } + + if !tea.BoolValue(util.IsUnset(request.RepoNamesShrink)) { + query["RepoNames"] = request.RepoNamesShrink + } + + if !tea.BoolValue(util.IsUnset(request.RepoTagFilterPattern)) { + query["RepoTagFilterPattern"] = request.RepoTagFilterPattern + } + + if !tea.BoolValue(util.IsUnset(request.RuleId)) { + query["RuleId"] = request.RuleId + } + + if !tea.BoolValue(util.IsUnset(request.RuleName)) { + query["RuleName"] = request.RuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateEventCenterRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateEventCenterRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateEventCenterRule(request *UpdateEventCenterRuleRequest) (_result *UpdateEventCenterRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateEventCenterRuleResponse{} + _body, _err := client.UpdateEventCenterRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateInstanceEndpointStatusWithOptions(request *UpdateInstanceEndpointStatusRequest, runtime *util.RuntimeOptions) (_result *UpdateInstanceEndpointStatusResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Enable)) { + query["Enable"] = request.Enable + } + + if !tea.BoolValue(util.IsUnset(request.EndpointType)) { + query["EndpointType"] = request.EndpointType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.ModuleName)) { + query["ModuleName"] = request.ModuleName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateInstanceEndpointStatus"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateInstanceEndpointStatusResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateInstanceEndpointStatus(request *UpdateInstanceEndpointStatusRequest) (_result *UpdateInstanceEndpointStatusResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateInstanceEndpointStatusResponse{} + _body, _err := client.UpdateInstanceEndpointStatusWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateNamespaceWithOptions(request *UpdateNamespaceRequest, runtime *util.RuntimeOptions) (_result *UpdateNamespaceResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoCreateRepo)) { + query["AutoCreateRepo"] = request.AutoCreateRepo + } + + if !tea.BoolValue(util.IsUnset(request.DefaultRepoType)) { + query["DefaultRepoType"] = request.DefaultRepoType + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.NamespaceName)) { + query["NamespaceName"] = request.NamespaceName + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateNamespace"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateNamespaceResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateNamespace(request *UpdateNamespaceRequest) (_result *UpdateNamespaceResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateNamespaceResponse{} + _body, _err := client.UpdateNamespaceWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoBuildRuleWithOptions(request *UpdateRepoBuildRuleRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoBuildRuleResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.BuildArgs)) { + query["BuildArgs"] = request.BuildArgs + } + + if !tea.BoolValue(util.IsUnset(request.BuildRuleId)) { + query["BuildRuleId"] = request.BuildRuleId + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileLocation)) { + query["DockerfileLocation"] = request.DockerfileLocation + } + + if !tea.BoolValue(util.IsUnset(request.DockerfileName)) { + query["DockerfileName"] = request.DockerfileName + } + + if !tea.BoolValue(util.IsUnset(request.ImageTag)) { + query["ImageTag"] = request.ImageTag + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.Platforms)) { + query["Platforms"] = request.Platforms + } + + if !tea.BoolValue(util.IsUnset(request.PushName)) { + query["PushName"] = request.PushName + } + + if !tea.BoolValue(util.IsUnset(request.PushType)) { + query["PushType"] = request.PushType + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoBuildRule"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoBuildRule(request *UpdateRepoBuildRuleRequest) (_result *UpdateRepoBuildRuleResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoBuildRuleResponse{} + _body, _err := client.UpdateRepoBuildRuleWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoSourceCodeRepoWithOptions(request *UpdateRepoSourceCodeRepoRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoSourceCodeRepoResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.AutoBuild)) { + query["AutoBuild"] = request.AutoBuild + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoId)) { + query["CodeRepoId"] = request.CodeRepoId + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoName)) { + query["CodeRepoName"] = request.CodeRepoName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoNamespaceName)) { + query["CodeRepoNamespaceName"] = request.CodeRepoNamespaceName + } + + if !tea.BoolValue(util.IsUnset(request.CodeRepoType)) { + query["CodeRepoType"] = request.CodeRepoType + } + + if !tea.BoolValue(util.IsUnset(request.DisableCacheBuild)) { + query["DisableCacheBuild"] = request.DisableCacheBuild + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.OverseaBuild)) { + query["OverseaBuild"] = request.OverseaBuild + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoSourceCodeRepo"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoSourceCodeRepoResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoSourceCodeRepo(request *UpdateRepoSourceCodeRepoRequest) (_result *UpdateRepoSourceCodeRepoResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoSourceCodeRepoResponse{} + _body, _err := client.UpdateRepoSourceCodeRepoWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepoTriggerWithOptions(request *UpdateRepoTriggerRequest, runtime *util.RuntimeOptions) (_result *UpdateRepoTriggerResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerId)) { + query["TriggerId"] = request.TriggerId + } + + if !tea.BoolValue(util.IsUnset(request.TriggerName)) { + query["TriggerName"] = request.TriggerName + } + + if !tea.BoolValue(util.IsUnset(request.TriggerTag)) { + query["TriggerTag"] = request.TriggerTag + } + + if !tea.BoolValue(util.IsUnset(request.TriggerType)) { + query["TriggerType"] = request.TriggerType + } + + if !tea.BoolValue(util.IsUnset(request.TriggerUrl)) { + query["TriggerUrl"] = request.TriggerUrl + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepoTrigger"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepoTriggerResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepoTrigger(request *UpdateRepoTriggerRequest) (_result *UpdateRepoTriggerResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepoTriggerResponse{} + _body, _err := client.UpdateRepoTriggerWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (client *Client) UpdateRepositoryWithOptions(request *UpdateRepositoryRequest, runtime *util.RuntimeOptions) (_result *UpdateRepositoryResponse, _err error) { + _err = util.ValidateModel(request) + if _err != nil { + return _result, _err + } + query := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(request.Detail)) { + query["Detail"] = request.Detail + } + + if !tea.BoolValue(util.IsUnset(request.InstanceId)) { + query["InstanceId"] = request.InstanceId + } + + if !tea.BoolValue(util.IsUnset(request.RepoId)) { + query["RepoId"] = request.RepoId + } + + if !tea.BoolValue(util.IsUnset(request.RepoType)) { + query["RepoType"] = request.RepoType + } + + if !tea.BoolValue(util.IsUnset(request.Summary)) { + query["Summary"] = request.Summary + } + + if !tea.BoolValue(util.IsUnset(request.TagImmutability)) { + query["TagImmutability"] = request.TagImmutability + } + + req := &openapi.OpenApiRequest{ + Query: openapiutil.Query(query), + } + params := &openapi.Params{ + Action: tea.String("UpdateRepository"), + Version: tea.String("2018-12-01"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/"), + Method: tea.String("POST"), + AuthType: tea.String("AK"), + Style: tea.String("RPC"), + ReqBodyType: tea.String("formData"), + BodyType: tea.String("json"), + } + _result = &UpdateRepositoryResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +func (client *Client) UpdateRepository(request *UpdateRepositoryRequest) (_result *UpdateRepositoryResponse, _err error) { + runtime := &util.RuntimeOptions{} + _result = &UpdateRepositoryResponse{} + _body, _err := client.UpdateRepositoryWithOptions(request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} diff --git a/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE b/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/darabonba-openapi/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go b/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go new file mode 100644 index 0000000000..6da3e1fdd1 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/darabonba-openapi/client/client.go @@ -0,0 +1,1623 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * This is for OpenApi SDK + */ +package client + +import ( + "io" + + spi "github.com/alibabacloud-go/alibabacloud-gateway-spi/client" + openapiutil "github.com/alibabacloud-go/openapi-util/service" + util "github.com/alibabacloud-go/tea-utils/service" + xml "github.com/alibabacloud-go/tea-xml/service" + "github.com/alibabacloud-go/tea/tea" + credential "github.com/aliyun/credentials-go/credentials" +) + +/** + * Model for initing client + */ +type Config struct { + // accesskey id + AccessKeyId *string `json:"accessKeyId,omitempty" xml:"accessKeyId,omitempty"` + // accesskey secret + AccessKeySecret *string `json:"accessKeySecret,omitempty" xml:"accessKeySecret,omitempty"` + // security token + SecurityToken *string `json:"securityToken,omitempty" xml:"securityToken,omitempty"` + // http protocol + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty"` + // http method + Method *string `json:"method,omitempty" xml:"method,omitempty"` + // region id + RegionId *string `json:"regionId,omitempty" xml:"regionId,omitempty"` + // read timeout + ReadTimeout *int `json:"readTimeout,omitempty" xml:"readTimeout,omitempty"` + // connect timeout + ConnectTimeout *int `json:"connectTimeout,omitempty" xml:"connectTimeout,omitempty"` + // http proxy + HttpProxy *string `json:"httpProxy,omitempty" xml:"httpProxy,omitempty"` + // https proxy + HttpsProxy *string `json:"httpsProxy,omitempty" xml:"httpsProxy,omitempty"` + // credential + Credential credential.Credential `json:"credential,omitempty" xml:"credential,omitempty"` + // endpoint + Endpoint *string `json:"endpoint,omitempty" xml:"endpoint,omitempty"` + // proxy white list + NoProxy *string `json:"noProxy,omitempty" xml:"noProxy,omitempty"` + // max idle conns + MaxIdleConns *int `json:"maxIdleConns,omitempty" xml:"maxIdleConns,omitempty"` + // network for endpoint + Network *string `json:"network,omitempty" xml:"network,omitempty"` + // user agent + UserAgent *string `json:"userAgent,omitempty" xml:"userAgent,omitempty"` + // suffix for endpoint + Suffix *string `json:"suffix,omitempty" xml:"suffix,omitempty"` + // socks5 proxy + Socks5Proxy *string `json:"socks5Proxy,omitempty" xml:"socks5Proxy,omitempty"` + // socks5 network + Socks5NetWork *string `json:"socks5NetWork,omitempty" xml:"socks5NetWork,omitempty"` + // endpoint type + EndpointType *string `json:"endpointType,omitempty" xml:"endpointType,omitempty"` + // OpenPlatform endpoint + OpenPlatformEndpoint *string `json:"openPlatformEndpoint,omitempty" xml:"openPlatformEndpoint,omitempty"` + // Deprecated + // credential type + Type *string `json:"type,omitempty" xml:"type,omitempty"` + // Signature Version + SignatureVersion *string `json:"signatureVersion,omitempty" xml:"signatureVersion,omitempty"` + // Signature Algorithm + SignatureAlgorithm *string `json:"signatureAlgorithm,omitempty" xml:"signatureAlgorithm,omitempty"` +} + +func (s Config) String() string { + return tea.Prettify(s) +} + +func (s Config) GoString() string { + return s.String() +} + +func (s *Config) SetAccessKeyId(v string) *Config { + s.AccessKeyId = &v + return s +} + +func (s *Config) SetAccessKeySecret(v string) *Config { + s.AccessKeySecret = &v + return s +} + +func (s *Config) SetSecurityToken(v string) *Config { + s.SecurityToken = &v + return s +} + +func (s *Config) SetProtocol(v string) *Config { + s.Protocol = &v + return s +} + +func (s *Config) SetMethod(v string) *Config { + s.Method = &v + return s +} + +func (s *Config) SetRegionId(v string) *Config { + s.RegionId = &v + return s +} + +func (s *Config) SetReadTimeout(v int) *Config { + s.ReadTimeout = &v + return s +} + +func (s *Config) SetConnectTimeout(v int) *Config { + s.ConnectTimeout = &v + return s +} + +func (s *Config) SetHttpProxy(v string) *Config { + s.HttpProxy = &v + return s +} + +func (s *Config) SetHttpsProxy(v string) *Config { + s.HttpsProxy = &v + return s +} + +func (s *Config) SetCredential(v credential.Credential) *Config { + s.Credential = v + return s +} + +func (s *Config) SetEndpoint(v string) *Config { + s.Endpoint = &v + return s +} + +func (s *Config) SetNoProxy(v string) *Config { + s.NoProxy = &v + return s +} + +func (s *Config) SetMaxIdleConns(v int) *Config { + s.MaxIdleConns = &v + return s +} + +func (s *Config) SetNetwork(v string) *Config { + s.Network = &v + return s +} + +func (s *Config) SetUserAgent(v string) *Config { + s.UserAgent = &v + return s +} + +func (s *Config) SetSuffix(v string) *Config { + s.Suffix = &v + return s +} + +func (s *Config) SetSocks5Proxy(v string) *Config { + s.Socks5Proxy = &v + return s +} + +func (s *Config) SetSocks5NetWork(v string) *Config { + s.Socks5NetWork = &v + return s +} + +func (s *Config) SetEndpointType(v string) *Config { + s.EndpointType = &v + return s +} + +func (s *Config) SetOpenPlatformEndpoint(v string) *Config { + s.OpenPlatformEndpoint = &v + return s +} + +func (s *Config) SetType(v string) *Config { + s.Type = &v + return s +} + +func (s *Config) SetSignatureVersion(v string) *Config { + s.SignatureVersion = &v + return s +} + +func (s *Config) SetSignatureAlgorithm(v string) *Config { + s.SignatureAlgorithm = &v + return s +} + +type OpenApiRequest struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty"` + Query map[string]*string `json:"query,omitempty" xml:"query,omitempty"` + Body interface{} `json:"body,omitempty" xml:"body,omitempty"` + Stream io.Reader `json:"stream,omitempty" xml:"stream,omitempty"` + HostMap map[string]*string `json:"hostMap,omitempty" xml:"hostMap,omitempty"` + EndpointOverride *string `json:"endpointOverride,omitempty" xml:"endpointOverride,omitempty"` +} + +func (s OpenApiRequest) String() string { + return tea.Prettify(s) +} + +func (s OpenApiRequest) GoString() string { + return s.String() +} + +func (s *OpenApiRequest) SetHeaders(v map[string]*string) *OpenApiRequest { + s.Headers = v + return s +} + +func (s *OpenApiRequest) SetQuery(v map[string]*string) *OpenApiRequest { + s.Query = v + return s +} + +func (s *OpenApiRequest) SetBody(v interface{}) *OpenApiRequest { + s.Body = v + return s +} + +func (s *OpenApiRequest) SetStream(v io.Reader) *OpenApiRequest { + s.Stream = v + return s +} + +func (s *OpenApiRequest) SetHostMap(v map[string]*string) *OpenApiRequest { + s.HostMap = v + return s +} + +func (s *OpenApiRequest) SetEndpointOverride(v string) *OpenApiRequest { + s.EndpointOverride = &v + return s +} + +type Params struct { + Action *string `json:"action,omitempty" xml:"action,omitempty" require:"true"` + Version *string `json:"version,omitempty" xml:"version,omitempty" require:"true"` + Protocol *string `json:"protocol,omitempty" xml:"protocol,omitempty" require:"true"` + Pathname *string `json:"pathname,omitempty" xml:"pathname,omitempty" require:"true"` + Method *string `json:"method,omitempty" xml:"method,omitempty" require:"true"` + AuthType *string `json:"authType,omitempty" xml:"authType,omitempty" require:"true"` + BodyType *string `json:"bodyType,omitempty" xml:"bodyType,omitempty" require:"true"` + ReqBodyType *string `json:"reqBodyType,omitempty" xml:"reqBodyType,omitempty" require:"true"` + Style *string `json:"style,omitempty" xml:"style,omitempty"` +} + +func (s Params) String() string { + return tea.Prettify(s) +} + +func (s Params) GoString() string { + return s.String() +} + +func (s *Params) SetAction(v string) *Params { + s.Action = &v + return s +} + +func (s *Params) SetVersion(v string) *Params { + s.Version = &v + return s +} + +func (s *Params) SetProtocol(v string) *Params { + s.Protocol = &v + return s +} + +func (s *Params) SetPathname(v string) *Params { + s.Pathname = &v + return s +} + +func (s *Params) SetMethod(v string) *Params { + s.Method = &v + return s +} + +func (s *Params) SetAuthType(v string) *Params { + s.AuthType = &v + return s +} + +func (s *Params) SetBodyType(v string) *Params { + s.BodyType = &v + return s +} + +func (s *Params) SetReqBodyType(v string) *Params { + s.ReqBodyType = &v + return s +} + +func (s *Params) SetStyle(v string) *Params { + s.Style = &v + return s +} + +type Client struct { + Endpoint *string + RegionId *string + Protocol *string + Method *string + UserAgent *string + EndpointRule *string + EndpointMap map[string]*string + Suffix *string + ReadTimeout *int + ConnectTimeout *int + HttpProxy *string + HttpsProxy *string + Socks5Proxy *string + Socks5NetWork *string + NoProxy *string + Network *string + ProductId *string + MaxIdleConns *int + EndpointType *string + OpenPlatformEndpoint *string + Credential credential.Credential + SignatureVersion *string + SignatureAlgorithm *string + Headers map[string]*string + Spi spi.ClientInterface +} + +/** + * Init client with Config + * @param config config contains the necessary information to create a client + */ +func NewClient(config *Config) (*Client, error) { + client := new(Client) + err := client.Init(config) + return client, err +} + +func (client *Client) Init(config *Config) (_err error) { + if tea.BoolValue(util.IsUnset(tea.ToMap(config))) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'config' can not be unset", + }) + return _err + } + + if !tea.BoolValue(util.Empty(config.AccessKeyId)) && !tea.BoolValue(util.Empty(config.AccessKeySecret)) { + if !tea.BoolValue(util.Empty(config.SecurityToken)) { + config.Type = tea.String("sts") + } else { + config.Type = tea.String("access_key") + } + + credentialConfig := &credential.Config{ + AccessKeyId: config.AccessKeyId, + Type: config.Type, + AccessKeySecret: config.AccessKeySecret, + SecurityToken: config.SecurityToken, + } + client.Credential, _err = credential.NewCredential(credentialConfig) + if _err != nil { + return _err + } + + } else if !tea.BoolValue(util.IsUnset(config.Credential)) { + client.Credential = config.Credential + } + + client.Endpoint = config.Endpoint + client.EndpointType = config.EndpointType + client.Network = config.Network + client.Suffix = config.Suffix + client.Protocol = config.Protocol + client.Method = config.Method + client.RegionId = config.RegionId + client.UserAgent = config.UserAgent + client.ReadTimeout = config.ReadTimeout + client.ConnectTimeout = config.ConnectTimeout + client.HttpProxy = config.HttpProxy + client.HttpsProxy = config.HttpsProxy + client.NoProxy = config.NoProxy + client.Socks5Proxy = config.Socks5Proxy + client.Socks5NetWork = config.Socks5NetWork + client.MaxIdleConns = config.MaxIdleConns + client.SignatureVersion = config.SignatureVersion + client.SignatureAlgorithm = config.SignatureAlgorithm + return nil +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoRPCRequest(action *string, version *string, protocol *string, method *string, authType *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = tea.String("/") + request_.Query = tea.Merge(map[string]*string{ + "Action": action, + "Format": tea.String("json"), + "Version": version, + "Timestamp": openapiutil.GetTimestamp(), + "SignatureNonce": util.GetNonce(), + }, request.Query) + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + if tea.BoolValue(util.IsUnset(headers)) { + // endpoint is setted in product client + request_.Headers = map[string]*string{ + "host": client.Endpoint, + "x-acs-version": version, + "x-acs-action": action, + "user-agent": client.GetUserAgent(), + } + } else { + request_.Headers = tea.Merge(map[string]*string{ + "host": client.Endpoint, + "x-acs-version": version, + "x-acs-action": action, + "user-agent": client.GetUserAgent(), + }, headers) + } + + if !tea.BoolValue(util.IsUnset(request.Body)) { + m := util.AssertAsMap(request.Body) + tmp := util.AnyifyMapValue(openapiutil.Query(m)) + request_.Body = tea.ToReader(util.ToFormString(tmp)) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Query["SecurityToken"] = securityToken + } + + request_.Query["SignatureMethod"] = tea.String("HMAC-SHA1") + request_.Query["SignatureVersion"] = tea.String("1.0") + request_.Query["AccessKeyId"] = accessKeyId + var t map[string]interface{} + if !tea.BoolValue(util.IsUnset(request.Body)) { + t = util.AssertAsMap(request.Body) + } + + signedParam := tea.Merge(request_.Query, + openapiutil.Query(t)) + request_.Query["Signature"] = openapiutil.GetRPCSignature(signedParam, request_.Method, accessKeySecret) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + requestId := DefaultAny(err["RequestId"], err["requestId"]) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(requestId), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param pathname pathname of every api + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoROARequest(action *string, version *string, protocol *string, method *string, authType *string, pathname *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = pathname + request_.Headers = tea.Merge(map[string]*string{ + "date": util.GetDateUTCString(), + "host": client.Endpoint, + "accept": tea.String("application/json"), + "x-acs-signature-nonce": util.GetNonce(), + "x-acs-signature-method": tea.String("HMAC-SHA1"), + "x-acs-signature-version": tea.String("1.0"), + "x-acs-version": version, + "x-acs-action": action, + "user-agent": util.GetUserAgent(client.UserAgent), + }, request.Headers) + if !tea.BoolValue(util.IsUnset(request.Body)) { + request_.Body = tea.ToReader(util.ToJSONString(request.Body)) + request_.Headers["content-type"] = tea.String("application/json; charset=utf-8") + } + + if !tea.BoolValue(util.IsUnset(request.Query)) { + request_.Query = request.Query + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + stringToSign := openapiutil.GetStringToSign(request_) + request_.Headers["authorization"] = tea.String("acs " + tea.StringValue(accessKeyId) + ":" + tea.StringValue(openapiutil.GetROASignature(stringToSign, accessKeySecret))) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.EqualNumber(response_.StatusCode, tea.Int(204))) { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + requestId := DefaultAny(err["RequestId"], err["requestId"]) + requestId = DefaultAny(requestId, err["requestid"]) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(requestId), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network with form body + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param pathname pathname of every api + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoROARequestWithForm(action *string, version *string, protocol *string, method *string, authType *string, pathname *string, bodyType *string, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, protocol) + request_.Method = method + request_.Pathname = pathname + request_.Headers = tea.Merge(map[string]*string{ + "date": util.GetDateUTCString(), + "host": client.Endpoint, + "accept": tea.String("application/json"), + "x-acs-signature-nonce": util.GetNonce(), + "x-acs-signature-method": tea.String("HMAC-SHA1"), + "x-acs-signature-version": tea.String("1.0"), + "x-acs-version": version, + "x-acs-action": action, + "user-agent": util.GetUserAgent(client.UserAgent), + }, request.Headers) + if !tea.BoolValue(util.IsUnset(request.Body)) { + m := util.AssertAsMap(request.Body) + request_.Body = tea.ToReader(openapiutil.ToForm(m)) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + if !tea.BoolValue(util.IsUnset(request.Query)) { + request_.Query = request.Query + } + + if !tea.BoolValue(util.EqualString(authType, tea.String("Anonymous"))) { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + stringToSign := openapiutil.GetStringToSign(request_) + request_.Headers["authorization"] = tea.String("acs " + tea.StringValue(accessKeyId) + ":" + tea.StringValue(openapiutil.GetROASignature(stringToSign, accessKeySecret))) + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.EqualNumber(response_.StatusCode, tea.Int(204))) { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err := util.AssertAsMap(_res) + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(DefaultAny(err["RequestId"], err["requestId"])), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(bodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(bodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) DoRequest(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(params) + if _err != nil { + return _result, _err + } + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + request_.Protocol = util.DefaultString(client.Protocol, params.Protocol) + request_.Method = params.Method + request_.Pathname = params.Pathname + request_.Query = request.Query + // endpoint is setted in product client + request_.Headers = tea.Merge(map[string]*string{ + "host": client.Endpoint, + "x-acs-version": params.Version, + "x-acs-action": params.Action, + "user-agent": client.GetUserAgent(), + "x-acs-date": openapiutil.GetTimestamp(), + "x-acs-signature-nonce": util.GetNonce(), + "accept": tea.String("application/json"), + }, request.Headers) + if tea.BoolValue(util.EqualString(params.Style, tea.String("RPC"))) { + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.IsUnset(headers)) { + request_.Headers = tea.Merge(request_.Headers, + headers) + } + + } + + signatureAlgorithm := util.DefaultString(client.SignatureAlgorithm, tea.String("ACS3-HMAC-SHA256")) + hashedRequestPayload := openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(tea.String("")), signatureAlgorithm)) + if !tea.BoolValue(util.IsUnset(request.Stream)) { + tmp, _err := util.ReadAsBytes(request.Stream) + if _err != nil { + return _result, _err + } + + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(tmp, signatureAlgorithm)) + request_.Body = tea.ToReader(tmp) + request_.Headers["content-type"] = tea.String("application/octet-stream") + } else { + if !tea.BoolValue(util.IsUnset(request.Body)) { + if tea.BoolValue(util.EqualString(params.ReqBodyType, tea.String("json"))) { + jsonObj := util.ToJSONString(request.Body) + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(jsonObj), signatureAlgorithm)) + request_.Body = tea.ToReader(jsonObj) + request_.Headers["content-type"] = tea.String("application/json; charset=utf-8") + } else { + m := util.AssertAsMap(request.Body) + formObj := openapiutil.ToForm(m) + hashedRequestPayload = openapiutil.HexEncode(openapiutil.Hash(util.ToBytes(formObj), signatureAlgorithm)) + request_.Body = tea.ToReader(formObj) + request_.Headers["content-type"] = tea.String("application/x-www-form-urlencoded") + } + + } + + } + + request_.Headers["x-acs-content-sha256"] = hashedRequestPayload + if !tea.BoolValue(util.EqualString(params.AuthType, tea.String("Anonymous"))) { + authType, _err := client.GetType() + if _err != nil { + return _result, _err + } + + if tea.BoolValue(util.EqualString(authType, tea.String("bearer"))) { + bearerToken, _err := client.GetBearerToken() + if _err != nil { + return _result, _err + } + + request_.Headers["x-acs-bearer-token"] = bearerToken + } else { + accessKeyId, _err := client.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + accessKeySecret, _err := client.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + securityToken, _err := client.GetSecurityToken() + if _err != nil { + return _result, _err + } + + if !tea.BoolValue(util.Empty(securityToken)) { + request_.Headers["x-acs-accesskey-id"] = accessKeyId + request_.Headers["x-acs-security-token"] = securityToken + } + + request_.Headers["Authorization"] = openapiutil.GetAuthorization(request_, signatureAlgorithm, hashedRequestPayload, accessKeyId, accessKeySecret) + } + + } + + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + if tea.BoolValue(util.Is4xx(response_.StatusCode)) || tea.BoolValue(util.Is5xx(response_.StatusCode)) { + err := map[string]interface{}{} + if !tea.BoolValue(util.IsUnset(response_.Headers["content-type"])) && tea.BoolValue(util.EqualString(response_.Headers["content-type"], tea.String("text/xml;charset=utf-8"))) { + _str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + respMap := xml.ParseXml(_str, nil) + err = util.AssertAsMap(respMap["Error"]) + } else { + _res, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + err = util.AssertAsMap(_res) + } + + err["statusCode"] = response_.StatusCode + _err = tea.NewSDKError(map[string]interface{}{ + "code": tea.ToString(DefaultAny(err["Code"], err["code"])), + "message": "code: " + tea.ToString(tea.IntValue(response_.StatusCode)) + ", " + tea.ToString(DefaultAny(err["Message"], err["message"])) + " request id: " + tea.ToString(DefaultAny(err["RequestId"], err["requestId"])), + "data": err, + }) + return _result, _err + } + + if tea.BoolValue(util.EqualString(params.BodyType, tea.String("binary"))) { + resp := map[string]interface{}{ + "body": response_.Body, + "headers": response_.Headers, + } + _result = resp + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("byte"))) { + byt, _err := util.ReadAsBytes(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": byt, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("string"))) { + str, _err := util.ReadAsString(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": tea.StringValue(str), + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("json"))) { + obj, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + res := util.AssertAsMap(obj) + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": res, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else if tea.BoolValue(util.EqualString(params.BodyType, tea.String("array"))) { + arr, _err := util.ReadAsJSON(response_.Body) + if _err != nil { + return _result, _err + } + + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "body": arr, + "headers": response_.Headers, + }, &_result) + return _result, _err + } else { + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]map[string]*string{ + "headers": response_.Headers, + }, &_result) + return _result, _err + } + + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +/** + * Encapsulate the request and invoke the network + * @param action api name + * @param version product version + * @param protocol http or https + * @param method e.g. GET + * @param authType authorization type e.g. AK + * @param bodyType response body type e.g. String + * @param request object of OpenApiRequest + * @param runtime which controls some details of call api, such as retry times + * @return the response + */ +func (client *Client) Execute(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + _err = tea.Validate(params) + if _err != nil { + return _result, _err + } + _err = tea.Validate(request) + if _err != nil { + return _result, _err + } + _err = tea.Validate(runtime) + if _err != nil { + return _result, _err + } + _runtime := map[string]interface{}{ + "timeouted": "retry", + "readTimeout": tea.IntValue(util.DefaultNumber(runtime.ReadTimeout, client.ReadTimeout)), + "connectTimeout": tea.IntValue(util.DefaultNumber(runtime.ConnectTimeout, client.ConnectTimeout)), + "httpProxy": tea.StringValue(util.DefaultString(runtime.HttpProxy, client.HttpProxy)), + "httpsProxy": tea.StringValue(util.DefaultString(runtime.HttpsProxy, client.HttpsProxy)), + "noProxy": tea.StringValue(util.DefaultString(runtime.NoProxy, client.NoProxy)), + "socks5Proxy": tea.StringValue(util.DefaultString(runtime.Socks5Proxy, client.Socks5Proxy)), + "socks5NetWork": tea.StringValue(util.DefaultString(runtime.Socks5NetWork, client.Socks5NetWork)), + "maxIdleConns": tea.IntValue(util.DefaultNumber(runtime.MaxIdleConns, client.MaxIdleConns)), + "retry": map[string]interface{}{ + "retryable": tea.BoolValue(runtime.Autoretry), + "maxAttempts": tea.IntValue(util.DefaultNumber(runtime.MaxAttempts, tea.Int(3))), + }, + "backoff": map[string]interface{}{ + "policy": tea.StringValue(util.DefaultString(runtime.BackoffPolicy, tea.String("no"))), + "period": tea.IntValue(util.DefaultNumber(runtime.BackoffPeriod, tea.Int(1))), + }, + "ignoreSSL": tea.BoolValue(runtime.IgnoreSSL), + } + + _resp := make(map[string]interface{}) + for _retryTimes := 0; tea.BoolValue(tea.AllowRetry(_runtime["retry"], tea.Int(_retryTimes))); _retryTimes++ { + if _retryTimes > 0 { + _backoffTime := tea.GetBackoffTime(_runtime["backoff"], tea.Int(_retryTimes)) + if tea.IntValue(_backoffTime) > 0 { + tea.Sleep(_backoffTime) + } + } + + _resp, _err = func() (map[string]interface{}, error) { + request_ := tea.NewRequest() + // spi = new Gateway();//Gateway implements SPI,这一步在产品 SDK 中实例化 + headers, _err := client.GetRpcHeaders() + if _err != nil { + return _result, _err + } + + requestContext := &spi.InterceptorContextRequest{ + Headers: tea.Merge(request.Headers, + headers), + Query: request.Query, + Body: request.Body, + Stream: request.Stream, + HostMap: request.HostMap, + Pathname: params.Pathname, + ProductId: client.ProductId, + Action: params.Action, + Version: params.Version, + Protocol: util.DefaultString(client.Protocol, params.Protocol), + Method: util.DefaultString(client.Method, params.Method), + AuthType: params.AuthType, + BodyType: params.BodyType, + ReqBodyType: params.ReqBodyType, + Style: params.Style, + Credential: client.Credential, + SignatureVersion: client.SignatureVersion, + SignatureAlgorithm: client.SignatureAlgorithm, + UserAgent: client.GetUserAgent(), + } + configurationContext := &spi.InterceptorContextConfiguration{ + RegionId: client.RegionId, + Endpoint: util.DefaultString(request.EndpointOverride, client.Endpoint), + EndpointRule: client.EndpointRule, + EndpointMap: client.EndpointMap, + EndpointType: client.EndpointType, + Network: client.Network, + Suffix: client.Suffix, + } + interceptorContext := &spi.InterceptorContext{ + Request: requestContext, + Configuration: configurationContext, + } + attributeMap := &spi.AttributeMap{} + // 1. spi.modifyConfiguration(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyConfiguration(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + // 2. spi.modifyRequest(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyRequest(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + request_.Protocol = interceptorContext.Request.Protocol + request_.Method = interceptorContext.Request.Method + request_.Pathname = interceptorContext.Request.Pathname + request_.Query = interceptorContext.Request.Query + request_.Body = interceptorContext.Request.Stream + request_.Headers = interceptorContext.Request.Headers + response_, _err := tea.DoRequest(request_, _runtime) + if _err != nil { + return _result, _err + } + responseContext := &spi.InterceptorContextResponse{ + StatusCode: response_.StatusCode, + Headers: response_.Headers, + Body: response_.Body, + } + interceptorContext.Response = responseContext + // 3. spi.modifyResponse(context: SPI.InterceptorContext, attributeMap: SPI.AttributeMap); + _err = client.Spi.ModifyResponse(interceptorContext, attributeMap) + if _err != nil { + return _result, _err + } + _result = make(map[string]interface{}) + _err = tea.Convert(map[string]interface{}{ + "headers": interceptorContext.Response.Headers, + "body": interceptorContext.Response.DeserializedBody, + }, &_result) + return _result, _err + }() + if !tea.BoolValue(tea.Retryable(_err)) { + break + } + } + + return _resp, _err +} + +func (client *Client) CallApi(params *Params, request *OpenApiRequest, runtime *util.RuntimeOptions) (_result map[string]interface{}, _err error) { + if tea.BoolValue(util.IsUnset(tea.ToMap(params))) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'params' can not be unset", + }) + return _result, _err + } + + if tea.BoolValue(util.IsUnset(client.SignatureAlgorithm)) || !tea.BoolValue(util.EqualString(client.SignatureAlgorithm, tea.String("v2"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoRequest(params, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else if tea.BoolValue(util.EqualString(params.Style, tea.String("ROA"))) && tea.BoolValue(util.EqualString(params.ReqBodyType, tea.String("json"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoROARequest(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.Pathname, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else if tea.BoolValue(util.EqualString(params.Style, tea.String("ROA"))) { + _result = make(map[string]interface{}) + _body, _err := client.DoROARequestWithForm(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.Pathname, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } else { + _result = make(map[string]interface{}) + _body, _err := client.DoRPCRequest(params.Action, params.Version, params.Protocol, params.Method, params.AuthType, params.BodyType, request, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err + } + +} + +/** + * Get user agent + * @return user agent + */ +func (client *Client) GetUserAgent() (_result *string) { + userAgent := util.GetUserAgent(client.UserAgent) + _result = userAgent + return _result +} + +/** + * Get accesskey id by using credential + * @return accesskey id + */ +func (client *Client) GetAccessKeyId() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + accessKeyId, _err := client.Credential.GetAccessKeyId() + if _err != nil { + return _result, _err + } + + _result = accessKeyId + return _result, _err +} + +/** + * Get accesskey secret by using credential + * @return accesskey secret + */ +func (client *Client) GetAccessKeySecret() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + secret, _err := client.Credential.GetAccessKeySecret() + if _err != nil { + return _result, _err + } + + _result = secret + return _result, _err +} + +/** + * Get security token by using credential + * @return security token + */ +func (client *Client) GetSecurityToken() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + token, _err := client.Credential.GetSecurityToken() + if _err != nil { + return _result, _err + } + + _result = token + return _result, _err +} + +/** + * Get bearer token by credential + * @return bearer token + */ +func (client *Client) GetBearerToken() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + token := client.Credential.GetBearerToken() + _result = token + return _result, _err +} + +/** + * Get credential type by credential + * @return credential type e.g. access_key + */ +func (client *Client) GetType() (_result *string, _err error) { + if tea.BoolValue(util.IsUnset(client.Credential)) { + _result = tea.String("") + return _result, _err + } + + authType := client.Credential.GetType() + _result = authType + return _result, _err +} + +/** + * If inputValue is not null, return it or return defaultValue + * @param inputValue users input value + * @param defaultValue default value + * @return the final result + */ +func DefaultAny(inputValue interface{}, defaultValue interface{}) (_result interface{}) { + if tea.BoolValue(util.IsUnset(inputValue)) { + _result = defaultValue + return _result + } + + _result = inputValue + return _result +} + +/** + * If the endpointRule and config.endpoint are empty, throw error + * @param config config contains the necessary information to create a client + */ +func (client *Client) CheckConfig(config *Config) (_err error) { + if tea.BoolValue(util.Empty(client.EndpointRule)) && tea.BoolValue(util.Empty(config.Endpoint)) { + _err = tea.NewSDKError(map[string]interface{}{ + "code": "ParameterMissing", + "message": "'config.endpoint' can not be empty", + }) + return _err + } + + return _err +} + +/** + * set RPC header for debug + * @param headers headers for debug, this header can be used only once. + */ +func (client *Client) SetRpcHeaders(headers map[string]*string) (_err error) { + client.Headers = headers + return _err +} + +/** + * get RPC header for debug + */ +func (client *Client) GetRpcHeaders() (_result map[string]*string, _err error) { + headers := client.Headers + client.Headers = nil + _result = headers + return _result, _err +} diff --git a/vendor/go.opentelemetry.io/contrib/LICENSE b/vendor/github.com/alibabacloud-go/debug/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/contrib/LICENSE rename to vendor/github.com/alibabacloud-go/debug/LICENSE diff --git a/vendor/github.com/alibabacloud-go/debug/debug/assert.go b/vendor/github.com/alibabacloud-go/debug/debug/assert.go new file mode 100644 index 0000000000..6fca15a63c --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/assert.go @@ -0,0 +1,12 @@ +package debug + +import ( + "reflect" + "testing" +) + +func assertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} diff --git a/vendor/github.com/alibabacloud-go/debug/debug/debug.go b/vendor/github.com/alibabacloud-go/debug/debug/debug.go new file mode 100644 index 0000000000..c977cb8c3d --- /dev/null +++ b/vendor/github.com/alibabacloud-go/debug/debug/debug.go @@ -0,0 +1,36 @@ +package debug + +import ( + "fmt" + "os" + "strings" +) + +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE b/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/endpoint-util/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go b/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go new file mode 100644 index 0000000000..85e5fda6ee --- /dev/null +++ b/vendor/github.com/alibabacloud-go/endpoint-util/service/service.go @@ -0,0 +1,41 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * Get endpoint + * @return string + */ +package service + +import ( + "fmt" + "strings" + + "github.com/alibabacloud-go/tea/tea" +) + +func GetEndpointRules(product, regionId, endpointType, network, suffix *string) (_result *string, _err error) { + if tea.StringValue(endpointType) == "regional" { + if tea.StringValue(regionId) == "" { + _err = fmt.Errorf("RegionId is empty, please set a valid RegionId") + return tea.String(""), _err + } + _result = tea.String(strings.Replace("..aliyuncs.com", + "", tea.StringValue(regionId), 1)) + } else { + _result = tea.String(".aliyuncs.com") + } + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", strings.ToLower(tea.StringValue(product)), 1)) + if tea.StringValue(network) == "" || tea.StringValue(network) == "public" { + _result = tea.String(strings.Replace(tea.StringValue(_result), "", "", 1)) + } else { + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", "-"+tea.StringValue(network), 1)) + } + if tea.StringValue(suffix) == "" { + _result = tea.String(strings.Replace(tea.StringValue(_result), "", "", 1)) + } else { + _result = tea.String(strings.Replace(tea.StringValue(_result), + "", "-"+tea.StringValue(suffix), 1)) + } + return _result, nil +} diff --git a/vendor/github.com/alibabacloud-go/openapi-util/LICENSE b/vendor/github.com/alibabacloud-go/openapi-util/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/openapi-util/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/openapi-util/service/service.go b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go new file mode 100644 index 0000000000..245eeccb08 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/openapi-util/service/service.go @@ -0,0 +1,635 @@ +// This file is auto-generated, don't edit it. Thanks. +/** + * This is for OpenApi Util + */ +package service + +import ( + "bytes" + "crypto" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "hash" + "io" + "net/http" + "net/textproto" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" + "github.com/tjfoc/gmsm/sm3" +) + +const ( + PEM_BEGIN = "-----BEGIN RSA PRIVATE KEY-----\n" + PEM_END = "\n-----END RSA PRIVATE KEY-----" +) + +type Sorter struct { + Keys []string + Vals []string +} + +func newSorter(m map[string]string) *Sorter { + hs := &Sorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *Sorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *Sorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *Sorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *Sorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} + +/** + * Convert all params of body other than type of readable into content + * @param body source Model + * @param content target Model + * @return void + */ +func Convert(body interface{}, content interface{}) { + res := make(map[string]interface{}) + val := reflect.ValueOf(body).Elem() + dataType := val.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, _ := field.Tag.Lookup("json") + name = strings.Split(name, ",omitempty")[0] + _, ok := val.Field(i).Interface().(io.Reader) + if !ok { + res[name] = val.Field(i).Interface() + } + } + byt, _ := json.Marshal(res) + json.Unmarshal(byt, content) +} + +/** + * Get the string to be signed according to request + * @param request which contains signed messages + * @return the signed string + */ +func GetStringToSign(request *tea.Request) (_result *string) { + return tea.String(getStringToSign(request)) +} + +func getStringToSign(request *tea.Request) string { + resource := tea.StringValue(request.Pathname) + queryParams := request.Query + // sort QueryParams by key + var queryKeys []string + for key := range queryParams { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + tmp := "" + for i := 0; i < len(queryKeys); i++ { + queryKey := queryKeys[i] + v := tea.StringValue(queryParams[queryKey]) + if v != "" { + tmp = tmp + "&" + queryKey + "=" + v + } else { + tmp = tmp + "&" + queryKey + } + } + if tmp != "" { + tmp = strings.TrimLeft(tmp, "&") + resource = resource + "?" + tmp + } + return getSignedStr(request, resource) +} + +func getSignedStr(req *tea.Request, canonicalizedResource string) string { + temp := make(map[string]string) + + for k, v := range req.Headers { + if strings.HasPrefix(strings.ToLower(k), "x-acs-") { + temp[strings.ToLower(k)] = tea.StringValue(v) + } + } + hs := newSorter(temp) + + // Sort the temp by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := tea.StringValue(req.Headers["date"]) + accept := tea.StringValue(req.Headers["accept"]) + contentType := tea.StringValue(req.Headers["content-type"]) + contentMd5 := tea.StringValue(req.Headers["content-md5"]) + + signStr := tea.StringValue(req.Method) + "\n" + accept + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + return signStr +} + +/** + * Get signature according to stringToSign, secret + * @param stringToSign the signed string + * @param secret accesskey secret + * @return the signature + */ +func GetROASignature(stringToSign *string, secret *string) (_result *string) { + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(tea.StringValue(secret))) + io.WriteString(h, tea.StringValue(stringToSign)) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return tea.String(signedStr) +} + +func GetEndpoint(endpoint *string, server *bool, endpointType *string) *string { + if tea.StringValue(endpointType) == "internal" { + strs := strings.Split(tea.StringValue(endpoint), ".") + strs[0] += "-internal" + endpoint = tea.String(strings.Join(strs, ".")) + } + if tea.BoolValue(server) && tea.StringValue(endpointType) == "accelerate" { + return tea.String("oss-accelerate.aliyuncs.com") + } + + return endpoint +} + +func HexEncode(raw []byte) *string { + return tea.String(hex.EncodeToString(raw)) +} + +func Hash(raw []byte, signatureAlgorithm *string) []byte { + signType := tea.StringValue(signatureAlgorithm) + if signType == "ACS3-HMAC-SHA256" || signType == "ACS3-RSA-SHA256" { + h := sha256.New() + h.Write(raw) + return h.Sum(nil) + } else if signType == "ACS3-HMAC-SM3" { + h := sm3.New() + h.Write(raw) + return h.Sum(nil) + } + return nil +} + +func GetEncodePath(path *string) *string { + uri := tea.StringValue(path) + strs := strings.Split(uri, "/") + for i, v := range strs { + strs[i] = url.QueryEscape(v) + } + uri = strings.Join(strs, "/") + uri = strings.Replace(uri, "+", "%20", -1) + uri = strings.Replace(uri, "*", "%2A", -1) + uri = strings.Replace(uri, "%7E", "~", -1) + return tea.String(uri) +} + +func GetEncodeParam(param *string) *string { + uri := tea.StringValue(param) + uri = url.QueryEscape(uri) + uri = strings.Replace(uri, "+", "%20", -1) + uri = strings.Replace(uri, "*", "%2A", -1) + uri = strings.Replace(uri, "%7E", "~", -1) + return tea.String(uri) +} + +func GetAuthorization(request *tea.Request, signatureAlgorithm, payload, acesskey, secret *string) *string { + canonicalURI := tea.StringValue(request.Pathname) + if canonicalURI == "" { + canonicalURI = "/" + } + + canonicalURI = strings.Replace(canonicalURI, "+", "%20", -1) + canonicalURI = strings.Replace(canonicalURI, "*", "%2A", -1) + canonicalURI = strings.Replace(canonicalURI, "%7E", "~", -1) + + method := tea.StringValue(request.Method) + canonicalQueryString := getCanonicalQueryString(request.Query) + canonicalheaders, signedHeaders := getCanonicalHeaders(request.Headers) + + canonicalRequest := method + "\n" + canonicalURI + "\n" + canonicalQueryString + "\n" + canonicalheaders + "\n" + + strings.Join(signedHeaders, ";") + "\n" + tea.StringValue(payload) + signType := tea.StringValue(signatureAlgorithm) + StringToSign := signType + "\n" + tea.StringValue(HexEncode(Hash([]byte(canonicalRequest), signatureAlgorithm))) + signature := tea.StringValue(HexEncode(SignatureMethod(tea.StringValue(secret), StringToSign, signType))) + auth := signType + " Credential=" + tea.StringValue(acesskey) + ",SignedHeaders=" + + strings.Join(signedHeaders, ";") + ",Signature=" + signature + return tea.String(auth) +} + +func SignatureMethod(secret, source, signatureAlgorithm string) []byte { + if signatureAlgorithm == "ACS3-HMAC-SHA256" { + h := hmac.New(sha256.New, []byte(secret)) + h.Write([]byte(source)) + return h.Sum(nil) + } else if signatureAlgorithm == "ACS3-HMAC-SM3" { + h := hmac.New(sm3.New, []byte(secret)) + h.Write([]byte(source)) + return h.Sum(nil) + } else if signatureAlgorithm == "ACS3-RSA-SHA256" { + return rsaSign(source, secret) + } + return nil +} + +func rsaSign(content, secret string) []byte { + h := crypto.SHA256.New() + h.Write([]byte(content)) + hashed := h.Sum(nil) + priv, err := parsePrivateKey(secret) + if err != nil { + return nil + } + sign, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, hashed) + if err != nil { + return nil + } + return sign +} + +func parsePrivateKey(privateKey string) (*rsa.PrivateKey, error) { + privateKey = formatPrivateKey(privateKey) + block, _ := pem.Decode([]byte(privateKey)) + if block == nil { + return nil, errors.New("PrivateKey is invalid") + } + priKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + switch priKey.(type) { + case *rsa.PrivateKey: + return priKey.(*rsa.PrivateKey), nil + default: + return nil, nil + } +} + +func formatPrivateKey(privateKey string) string { + if !strings.HasPrefix(privateKey, PEM_BEGIN) { + privateKey = PEM_BEGIN + privateKey + } + + if !strings.HasSuffix(privateKey, PEM_END) { + privateKey += PEM_END + } + return privateKey +} + +func getCanonicalHeaders(headers map[string]*string) (string, []string) { + tmp := make(map[string]string) + tmpHeader := http.Header{} + for k, v := range headers { + if strings.HasPrefix(strings.ToLower(k), "x-acs-") || strings.ToLower(k) == "host" || + strings.ToLower(k) == "content-type" { + tmp[strings.ToLower(k)] = strings.TrimSpace(tea.StringValue(v)) + tmpHeader.Add(strings.ToLower(k), strings.TrimSpace(tea.StringValue(v))) + } + } + hs := newSorter(tmp) + + // Sort the temp by the ascending order + hs.Sort() + canonicalheaders := "" + for _, key := range hs.Keys { + vals := tmpHeader[textproto.CanonicalMIMEHeaderKey(key)] + sort.Strings(vals) + canonicalheaders += key + ":" + strings.Join(vals, ",") + "\n" + } + + return canonicalheaders, hs.Keys +} + +func getCanonicalQueryString(query map[string]*string) string { + canonicalQueryString := "" + if tea.BoolValue(util.IsUnset(query)) { + return canonicalQueryString + } + tmp := make(map[string]string) + for k, v := range query { + tmp[k] = tea.StringValue(v) + } + + hs := newSorter(tmp) + + // Sort the temp by the ascending order + hs.Sort() + for i := range hs.Keys { + if hs.Vals[i] != "" { + canonicalQueryString += "&" + hs.Keys[i] + "=" + url.QueryEscape(hs.Vals[i]) + } else { + canonicalQueryString += "&" + hs.Keys[i] + "=" + } + } + canonicalQueryString = strings.Replace(canonicalQueryString, "+", "%20", -1) + canonicalQueryString = strings.Replace(canonicalQueryString, "*", "%2A", -1) + canonicalQueryString = strings.Replace(canonicalQueryString, "%7E", "~", -1) + + if canonicalQueryString != "" { + canonicalQueryString = strings.TrimLeft(canonicalQueryString, "&") + } + return canonicalQueryString +} + +/** + * Parse filter into a form string + * @param filter object + * @return the string + */ +func ToForm(filter map[string]interface{}) (_result *string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + m := util.AnyifyMapValue(result) + return util.ToFormString(m) +} + +func flatRepeatedList(dataValue reflect.Value, result map[string]*string, prefix string) { + if !dataValue.IsValid() { + return + } + + dataType := dataValue.Type() + if dataType.Kind().String() == "slice" { + handleRepeatedParams(dataValue, result, prefix) + } else if dataType.Kind().String() == "map" { + handleMap(dataValue, result, prefix) + } else { + result[prefix] = tea.String(fmt.Sprintf("%v", dataValue.Interface())) + } +} + +func handleRepeatedParams(repeatedFieldValue reflect.Value, result map[string]*string, prefix string) { + if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() { + for m := 0; m < repeatedFieldValue.Len(); m++ { + elementValue := repeatedFieldValue.Index(m) + key := prefix + "." + strconv.Itoa(m+1) + fieldValue := reflect.ValueOf(elementValue.Interface()) + if fieldValue.Kind().String() == "map" { + handleMap(fieldValue, result, key) + } else { + result[key] = tea.String(fmt.Sprintf("%v", fieldValue.Interface())) + } + } + } +} + +func handleMap(valueField reflect.Value, result map[string]*string, prefix string) { + if valueField.IsValid() && valueField.String() != "" { + valueFieldType := valueField.Type() + if valueFieldType.Kind().String() == "map" { + var byt []byte + byt, _ = json.Marshal(valueField.Interface()) + cache := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&cache) + for key, value := range cache { + pre := "" + if prefix != "" { + pre = prefix + "." + key + } else { + pre = key + } + fieldValue := reflect.ValueOf(value) + flatRepeatedList(fieldValue, result, pre) + } + } + } +} + +/** + * Get timestamp + * @return the timestamp string + */ +func GetTimestamp() (_result *string) { + gmt := time.FixedZone("GMT", 0) + return tea.String(time.Now().In(gmt).Format("2006-01-02T15:04:05Z")) +} + +/** + * Parse filter into a object which's type is map[string]string + * @param filter query param + * @return the object + */ +func Query(filter interface{}) (_result map[string]*string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + return result +} + +/** + * Get signature according to signedParams, method and secret + * @param signedParams params which need to be signed + * @param method http method e.g. GET + * @param secret AccessKeySecret + * @return the signature + */ +func GetRPCSignature(signedParams map[string]*string, method *string, secret *string) (_result *string) { + stringToSign := buildRpcStringToSign(signedParams, tea.StringValue(method)) + signature := sign(stringToSign, tea.StringValue(secret), "&") + return tea.String(signature) +} + +/** + * Parse array into a string with specified style + * @param array the array + * @param prefix the prefix string + * @style specified style e.g. repeatList + * @return the string + */ +func ArrayToStringWithSpecifiedStyle(array interface{}, prefix *string, style *string) (_result *string) { + if tea.BoolValue(util.IsUnset(array)) { + return tea.String("") + } + + sty := tea.StringValue(style) + if sty == "repeatList" { + tmp := map[string]interface{}{ + tea.StringValue(prefix): array, + } + return flatRepeatList(tmp) + } else if sty == "simple" || sty == "spaceDelimited" || sty == "pipeDelimited" { + return flatArray(array, sty) + } else if sty == "json" { + return util.ToJSONString(array) + } + return tea.String("") +} + +func ParseToMap(in interface{}) map[string]interface{} { + if tea.BoolValue(util.IsUnset(in)) { + return nil + } + + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(in) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err := d.Decode(&tmp) + if err != nil { + return nil + } + return tmp +} + +func flatRepeatList(filter map[string]interface{}) (_result *string) { + tmp := make(map[string]interface{}) + byt, _ := json.Marshal(filter) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + _ = d.Decode(&tmp) + + result := make(map[string]*string) + for key, value := range tmp { + filterValue := reflect.ValueOf(value) + flatRepeatedList(filterValue, result, key) + } + + res := make(map[string]string) + for k, v := range result { + res[k] = tea.StringValue(v) + } + hs := newSorter(res) + + hs.Sort() + + // Get the canonicalizedOSSHeaders + t := "" + for i := range hs.Keys { + if i == len(hs.Keys)-1 { + t += hs.Keys[i] + "=" + hs.Vals[i] + } else { + t += hs.Keys[i] + "=" + hs.Vals[i] + "&&" + } + } + return tea.String(t) +} + +func flatArray(array interface{}, sty string) *string { + t := reflect.ValueOf(array) + strs := make([]string, 0) + for i := 0; i < t.Len(); i++ { + tmp := t.Index(i) + if tmp.Kind() == reflect.Ptr || tmp.Kind() == reflect.Interface { + tmp = tmp.Elem() + } + + if tmp.Kind() == reflect.Ptr { + tmp = tmp.Elem() + } + if tmp.Kind() == reflect.String { + strs = append(strs, tmp.String()) + } else { + inter := tmp.Interface() + byt, _ := json.Marshal(inter) + strs = append(strs, string(byt)) + } + } + str := "" + if sty == "simple" { + str = strings.Join(strs, ",") + } else if sty == "spaceDelimited" { + str = strings.Join(strs, " ") + } else if sty == "pipeDelimited" { + str = strings.Join(strs, "|") + } + return tea.String(str) +} + +func buildRpcStringToSign(signedParam map[string]*string, method string) (stringToSign string) { + signParams := make(map[string]string) + for key, value := range signedParam { + signParams[key] = tea.StringValue(value) + } + + stringToSign = getUrlFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = method + "&%2F&" + stringToSign + return +} + +func getUrlFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func sign(stringToSign, accessKeySecret, secretSuffix string) string { + secret := accessKeySecret + secretSuffix + signedBytes := shaHmac1(stringToSign, secret) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +func shaHmac1(source, secret string) []byte { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + return hmac.Sum(nil) +} diff --git a/vendor/github.com/alibabacloud-go/tea-utils/LICENSE b/vendor/github.com/alibabacloud-go/tea-utils/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/tea-utils/service/service.go b/vendor/github.com/alibabacloud-go/tea-utils/service/service.go new file mode 100644 index 0000000000..2b69357238 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/service/service.go @@ -0,0 +1,462 @@ +package service + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/alibabacloud-go/tea/tea" +) + +var defaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s TeaDSL/1", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), "0.01") + +type RuntimeOptions struct { + Autoretry *bool `json:"autoretry" xml:"autoretry"` + IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"` + MaxAttempts *int `json:"maxAttempts" xml:"maxAttempts"` + BackoffPolicy *string `json:"backoffPolicy" xml:"backoffPolicy"` + BackoffPeriod *int `json:"backoffPeriod" xml:"backoffPeriod"` + ReadTimeout *int `json:"readTimeout" xml:"readTimeout"` + ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"` + LocalAddr *string `json:"localAddr" xml:"localAddr"` + HttpProxy *string `json:"httpProxy" xml:"httpProxy"` + HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"` + NoProxy *string `json:"noProxy" xml:"noProxy"` + MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"` + Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"` + Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"` +} + +func (s RuntimeOptions) String() string { + return tea.Prettify(s) +} + +func (s RuntimeOptions) GoString() string { + return s.String() +} + +func (s *RuntimeOptions) SetAutoretry(v bool) *RuntimeOptions { + s.Autoretry = &v + return s +} + +func (s *RuntimeOptions) SetIgnoreSSL(v bool) *RuntimeOptions { + s.IgnoreSSL = &v + return s +} + +func (s *RuntimeOptions) SetMaxAttempts(v int) *RuntimeOptions { + s.MaxAttempts = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPolicy(v string) *RuntimeOptions { + s.BackoffPolicy = &v + return s +} + +func (s *RuntimeOptions) SetBackoffPeriod(v int) *RuntimeOptions { + s.BackoffPeriod = &v + return s +} + +func (s *RuntimeOptions) SetReadTimeout(v int) *RuntimeOptions { + s.ReadTimeout = &v + return s +} + +func (s *RuntimeOptions) SetConnectTimeout(v int) *RuntimeOptions { + s.ConnectTimeout = &v + return s +} + +func (s *RuntimeOptions) SetHttpProxy(v string) *RuntimeOptions { + s.HttpProxy = &v + return s +} + +func (s *RuntimeOptions) SetHttpsProxy(v string) *RuntimeOptions { + s.HttpsProxy = &v + return s +} + +func (s *RuntimeOptions) SetNoProxy(v string) *RuntimeOptions { + s.NoProxy = &v + return s +} + +func (s *RuntimeOptions) SetMaxIdleConns(v int) *RuntimeOptions { + s.MaxIdleConns = &v + return s +} + +func (s *RuntimeOptions) SetLocalAddr(v string) *RuntimeOptions { + s.LocalAddr = &v + return s +} + +func (s *RuntimeOptions) SetSocks5Proxy(v string) *RuntimeOptions { + s.Socks5Proxy = &v + return s +} + +func (s *RuntimeOptions) SetSocks5NetWork(v string) *RuntimeOptions { + s.Socks5NetWork = &v + return s +} + +func ReadAsString(body io.Reader) (*string, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return tea.String(""), err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return tea.String(string(byt)), nil +} + +func StringifyMapValue(a map[string]interface{}) map[string]*string { + res := make(map[string]*string) + for key, value := range a { + if value != nil { + switch value.(type) { + case string: + res[key] = tea.String(value.(string)) + default: + byt, _ := json.Marshal(value) + res[key] = tea.String(string(byt)) + } + } + } + return res +} + +func AnyifyMapValue(a map[string]*string) map[string]interface{} { + res := make(map[string]interface{}) + for key, value := range a { + res[key] = tea.StringValue(value) + } + return res +} + +func ReadAsBytes(body io.Reader) ([]byte, error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + return byt, nil +} + +func DefaultString(reaStr, defaultStr *string) *string { + if reaStr == nil { + return defaultStr + } + return reaStr +} + +func ToJSONString(a interface{}) *string { + switch v := a.(type) { + case *string: + return v + case string: + return tea.String(v) + case []byte: + return tea.String(string(v)) + case io.Reader: + byt, err := ioutil.ReadAll(v) + if err != nil { + return nil + } + return tea.String(string(byt)) + } + byt, err := json.Marshal(a) + if err != nil { + return nil + } + return tea.String(string(byt)) +} + +func DefaultNumber(reaNum, defaultNum *int) *int { + if reaNum == nil { + return defaultNum + } + return reaNum +} + +func ReadAsJSON(body io.Reader) (result interface{}, err error) { + byt, err := ioutil.ReadAll(body) + if err != nil { + return + } + if string(byt) == "" { + return + } + r, ok := body.(io.ReadCloser) + if ok { + r.Close() + } + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err = d.Decode(&result) + return +} + +func GetNonce() *string { + return tea.String(getUUID()) +} + +func Empty(val *string) *bool { + return tea.Bool(val == nil || tea.StringValue(val) == "") +} + +func ValidateModel(a interface{}) error { + if a == nil { + return nil + } + err := tea.Validate(a) + return err +} + +func EqualString(val1, val2 *string) *bool { + return tea.Bool(tea.StringValue(val1) == tea.StringValue(val2)) +} + +func EqualNumber(val1, val2 *int) *bool { + return tea.Bool(tea.IntValue(val1) == tea.IntValue(val2)) +} + +func IsUnset(val interface{}) *bool { + if val == nil { + return tea.Bool(true) + } + + v := reflect.ValueOf(val) + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Slice || v.Kind() == reflect.Map { + return tea.Bool(v.IsNil()) + } + + valType := reflect.TypeOf(val) + valZero := reflect.Zero(valType) + return tea.Bool(valZero == v) +} + +func ToBytes(a *string) []byte { + return []byte(tea.StringValue(a)) +} + +func AssertAsMap(a interface{}) map[string]interface{} { + r := reflect.ValueOf(a) + if r.Kind().String() != "map" { + panic(fmt.Sprintf("%v is not a map[string]interface{}", a)) + } + + res := make(map[string]interface{}) + tmp := r.MapKeys() + for _, key := range tmp { + res[key.String()] = r.MapIndex(key).Interface() + } + + return res +} + +func AssertAsNumber(a interface{}) *int { + res := 0 + switch a.(type) { + case int: + tmp := a.(int) + res = tmp + case *int: + tmp := a.(*int) + res = tea.IntValue(tmp) + default: + panic(fmt.Sprintf("%v is not a int", a)) + } + + return tea.Int(res) +} + +func AssertAsBoolean(a interface{}) *bool { + res := false + switch a.(type) { + case bool: + tmp := a.(bool) + res = tmp + case *bool: + tmp := a.(*bool) + res = tea.BoolValue(tmp) + default: + panic(fmt.Sprintf("%v is not a bool", a)) + } + + return tea.Bool(res) +} + +func AssertAsString(a interface{}) *string { + res := "" + switch a.(type) { + case string: + tmp := a.(string) + res = tmp + case *string: + tmp := a.(*string) + res = tea.StringValue(tmp) + default: + panic(fmt.Sprintf("%v is not a string", a)) + } + + return tea.String(res) +} + +func AssertAsBytes(a interface{}) []byte { + res, ok := a.([]byte) + if !ok { + panic(fmt.Sprintf("%v is not []byte", a)) + } + return res +} + +func AssertAsReadable(a interface{}) io.Reader { + res, ok := a.(io.Reader) + if !ok { + panic(fmt.Sprintf("%v is not reader", a)) + } + return res +} + +func AssertAsArray(a interface{}) []interface{} { + r := reflect.ValueOf(a) + if r.Kind().String() != "array" && r.Kind().String() != "slice" { + panic(fmt.Sprintf("%v is not a [x]interface{}", a)) + } + aLen := r.Len() + res := make([]interface{}, 0) + for i := 0; i < aLen; i++ { + res = append(res, r.Index(i).Interface()) + } + return res +} + +func ParseJSON(a *string) interface{} { + mapTmp := make(map[string]interface{}) + d := json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err := d.Decode(&mapTmp) + if err == nil { + return mapTmp + } + + sliceTmp := make([]interface{}, 0) + d = json.NewDecoder(bytes.NewReader([]byte(tea.StringValue(a)))) + d.UseNumber() + err = d.Decode(&sliceTmp) + if err == nil { + return sliceTmp + } + + if num, err := strconv.Atoi(tea.StringValue(a)); err == nil { + return num + } + + if ok, err := strconv.ParseBool(tea.StringValue(a)); err == nil { + return ok + } + + if floa64tVal, err := strconv.ParseFloat(tea.StringValue(a), 64); err == nil { + return floa64tVal + } + return nil +} + +func ToString(a []byte) *string { + return tea.String(string(a)) +} + +func ToMap(in interface{}) map[string]interface{} { + if in == nil { + return nil + } + res := tea.ToMap(in) + return res +} + +func ToFormString(a map[string]interface{}) *string { + if a == nil { + return tea.String("") + } + res := "" + urlEncoder := url.Values{} + for key, value := range a { + v := fmt.Sprintf("%v", value) + urlEncoder.Add(key, v) + } + res = urlEncoder.Encode() + return tea.String(res) +} + +func GetDateUTCString() *string { + return tea.String(time.Now().UTC().Format(http.TimeFormat)) +} + +func GetUserAgent(userAgent *string) *string { + if userAgent != nil && tea.StringValue(userAgent) != "" { + return tea.String(defaultUserAgent + " " + tea.StringValue(userAgent)) + } + return tea.String(defaultUserAgent) +} + +func Is2xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 200 && tmp < 300) +} + +func Is3xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 300 && tmp < 400) +} + +func Is4xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 400 && tmp < 500) +} + +func Is5xx(code *int) *bool { + tmp := tea.IntValue(code) + return tea.Bool(tmp >= 500 && tmp < 600) +} + +func Sleep(millisecond *int) error { + ms := tea.IntValue(millisecond) + time.Sleep(time.Duration(ms) * time.Millisecond) + return nil +} + +func ToArray(in interface{}) []map[string]interface{} { + if tea.BoolValue(IsUnset(in)) { + return nil + } + + tmp := make([]map[string]interface{}, 0) + byt, _ := json.Marshal(in) + d := json.NewDecoder(bytes.NewReader(byt)) + d.UseNumber() + err := d.Decode(&tmp) + if err != nil { + return nil + } + return tmp +} diff --git a/vendor/github.com/alibabacloud-go/tea-utils/service/util.go b/vendor/github.com/alibabacloud-go/tea-utils/service/util.go new file mode 100644 index 0000000000..a73cb56008 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-utils/service/util.go @@ -0,0 +1,52 @@ +package service + +import ( + "crypto/md5" + "crypto/rand" + "encoding/hex" + "hash" + rand2 "math/rand" +) + +type UUID [16]byte + +const numBytes = "1234567890" + +func getUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +func randStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = numBytes[rand2.Intn(len(numBytes))] + } + return string(b) +} + +func newUUID() UUID { + ns := UUID{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, randStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/alibabacloud-go/tea-xml/service/service.go b/vendor/github.com/alibabacloud-go/tea-xml/service/service.go new file mode 100644 index 0000000000..33139c74b9 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea-xml/service/service.go @@ -0,0 +1,105 @@ +package service + +import ( + "bytes" + "encoding/xml" + "fmt" + "reflect" + "strings" + + "github.com/alibabacloud-go/tea/tea" + v2 "github.com/clbanning/mxj/v2" +) + +func ToXML(obj map[string]interface{}) *string { + return tea.String(mapToXML(obj)) +} + +func ParseXml(val *string, result interface{}) map[string]interface{} { + resp := make(map[string]interface{}) + + start := getStartElement([]byte(tea.StringValue(val))) + if result == nil { + vm, err := v2.NewMapXml([]byte(tea.StringValue(val))) + if err != nil { + return nil + } + return vm + } + out, err := xmlUnmarshal([]byte(tea.StringValue(val)), result) + if err != nil { + return resp + } + resp[start] = out + return resp +} + +func mapToXML(val map[string]interface{}) string { + res := "" + for key, value := range val { + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + res += `<` + key + `>` + res += mapToXML(v.(map[string]interface{})) + res += `` + default: + if fmt.Sprintf("%v", v) != `` { + res += `<` + key + `>` + res += fmt.Sprintf("%v", v) + res += `` + } + } + } + case map[string]interface{}: + res += `<` + key + `>` + res += mapToXML(value.(map[string]interface{})) + res += `` + default: + if fmt.Sprintf("%v", value) != `` { + res += `<` + key + `>` + res += fmt.Sprintf("%v", value) + res += `` + } + } + } + return res +} + +func getStartElement(body []byte) string { + d := xml.NewDecoder(bytes.NewReader(body)) + for { + tok, err := d.Token() + if err != nil { + return "" + } + if t, ok := tok.(xml.StartElement); ok { + return t.Name.Local + } + } +} + +func xmlUnmarshal(body []byte, result interface{}) (interface{}, error) { + start := getStartElement(body) + dataValue := reflect.ValueOf(result).Elem() + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("xml") + name = strings.Replace(name, ",omitempty", "", -1) + if containsNameTag { + if name == start { + realType := dataValue.Field(i).Type() + realValue := reflect.New(realType).Interface() + err := xml.Unmarshal(body, realValue) + if err != nil { + return nil, err + } + return realValue, nil + } + } + } + return nil, nil +} diff --git a/vendor/github.com/alibabacloud-go/tea/LICENSE b/vendor/github.com/alibabacloud-go/tea/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go new file mode 100644 index 0000000000..b3f202243d --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/json_parser.go @@ -0,0 +1,333 @@ +package tea + +import ( + "encoding/json" + "io" + "math" + "reflect" + "strconv" + "strings" + "unsafe" + + jsoniter "github.com/json-iterator/go" + "github.com/modern-go/reflect2" +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +var jsonParser jsoniter.API + +func init() { + jsonParser = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + + jsonParser.RegisterExtension(newBetterFuzzyExtension()) +} + +func newBetterFuzzyExtension() jsoniter.DecoderExtension { + return jsoniter.DecoderExtension{ + reflect2.DefaultTypeOfKind(reflect.String): &nullableFuzzyStringDecoder{}, + reflect2.DefaultTypeOfKind(reflect.Bool): &fuzzyBoolDecoder{}, + reflect2.DefaultTypeOfKind(reflect.Float32): &nullableFuzzyFloat32Decoder{}, + reflect2.DefaultTypeOfKind(reflect.Float64): &nullableFuzzyFloat64Decoder{}, + reflect2.DefaultTypeOfKind(reflect.Int): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxInt) || val < float64(minInt) { + iter.ReportError("fuzzy decode int", "exceed range") + return + } + *((*int)(ptr)) = int(val) + } else { + *((*int)(ptr)) = iter.ReadInt() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxUint) || val < 0 { + iter.ReportError("fuzzy decode uint", "exceed range") + return + } + *((*uint)(ptr)) = uint(val) + } else { + *((*uint)(ptr)) = iter.ReadUint() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt8) || val < float64(math.MinInt8) { + iter.ReportError("fuzzy decode int8", "exceed range") + return + } + *((*int8)(ptr)) = int8(val) + } else { + *((*int8)(ptr)) = iter.ReadInt8() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint8): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint8) || val < 0 { + iter.ReportError("fuzzy decode uint8", "exceed range") + return + } + *((*uint8)(ptr)) = uint8(val) + } else { + *((*uint8)(ptr)) = iter.ReadUint8() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt16) || val < float64(math.MinInt16) { + iter.ReportError("fuzzy decode int16", "exceed range") + return + } + *((*int16)(ptr)) = int16(val) + } else { + *((*int16)(ptr)) = iter.ReadInt16() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint16): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint16) || val < 0 { + iter.ReportError("fuzzy decode uint16", "exceed range") + return + } + *((*uint16)(ptr)) = uint16(val) + } else { + *((*uint16)(ptr)) = iter.ReadUint16() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt32) || val < float64(math.MinInt32) { + iter.ReportError("fuzzy decode int32", "exceed range") + return + } + *((*int32)(ptr)) = int32(val) + } else { + *((*int32)(ptr)) = iter.ReadInt32() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint32): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint32) || val < 0 { + iter.ReportError("fuzzy decode uint32", "exceed range") + return + } + *((*uint32)(ptr)) = uint32(val) + } else { + *((*uint32)(ptr)) = iter.ReadUint32() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Int64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt64) || val < float64(math.MinInt64) { + iter.ReportError("fuzzy decode int64", "exceed range") + return + } + *((*int64)(ptr)) = int64(val) + } else { + *((*int64)(ptr)) = iter.ReadInt64() + } + }}, + reflect2.DefaultTypeOfKind(reflect.Uint64): &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint64) || val < 0 { + iter.ReportError("fuzzy decode uint64", "exceed range") + return + } + *((*uint64)(ptr)) = uint64(val) + } else { + *((*uint64)(ptr)) = iter.ReadUint64() + } + }}, + } +} + +type nullableFuzzyStringDecoder struct { +} + +func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + *((*string)(ptr)) = string(number) + case jsoniter.StringValue: + *((*string)(ptr)) = iter.ReadString() + case jsoniter.BoolValue: + *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool()) + case jsoniter.NilValue: + iter.ReadNil() + *((*string)(ptr)) = "" + default: + iter.ReportError("fuzzyStringDecoder", "not number or string or bool") + } +} + +type fuzzyBoolDecoder struct { +} + +func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.BoolValue: + *((*bool)(ptr)) = iter.ReadBool() + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + num, err := number.Int64() + if err != nil { + iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed") + } + if num == 0 { + *((*bool)(ptr)) = false + } else { + *((*bool)(ptr)) = true + } + case jsoniter.StringValue: + strValue := strings.ToLower(iter.ReadString()) + if strValue == "true" { + *((*bool)(ptr)) = true + } else if strValue == "false" || strValue == "" { + *((*bool)(ptr)) = false + } else { + iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue) + } + case jsoniter.NilValue: + iter.ReadNil() + *((*bool)(ptr)) = false + default: + iter.ReportError("fuzzyBoolDecoder", "not number or string or nil") + } +} + +type nullableFuzzyIntegerDecoder struct { + fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) +} + +func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + str = string(number) + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + str = "0" + } + case jsoniter.BoolValue: + if iter.ReadBool() { + str = "1" + } else { + str = "0" + } + case jsoniter.NilValue: + iter.ReadNil() + str = "0" + default: + iter.ReportError("fuzzyIntegerDecoder", "not number or string") + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + isFloat := strings.IndexByte(str, '.') != -1 + decoder.fun(isFloat, ptr, newIter) + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } +} + +type nullableFuzzyFloat32Decoder struct { +} + +func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float32)(ptr)) = iter.ReadFloat32() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float32)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float32)(ptr)) = newIter.ReadFloat32() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float32 + if iter.ReadBool() { + *((*float32)(ptr)) = 1 + } else { + *((*float32)(ptr)) = 0 + } + case jsoniter.NilValue: + iter.ReadNil() + *((*float32)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} + +type nullableFuzzyFloat64Decoder struct { +} + +func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float64)(ptr)) = iter.ReadFloat64() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float64)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float64)(ptr)) = newIter.ReadFloat64() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float64 + if iter.ReadBool() { + *((*float64)(ptr)) = 1 + } else { + *((*float64)(ptr)) = 0 + } + case jsoniter.NilValue: + // support empty string + iter.ReadNil() + *((*float64)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string") + } +} diff --git a/vendor/github.com/alibabacloud-go/tea/tea/tea.go b/vendor/github.com/alibabacloud-go/tea/tea/tea.go new file mode 100644 index 0000000000..4fbddd3cbd --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/tea.go @@ -0,0 +1,1140 @@ +package tea + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/alibabacloud-go/tea/utils" + + "golang.org/x/net/proxy" +) + +var debugLog = debug.Init("tea") + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +var basicTypes = []string{ + "int", "int16", "int64", "int32", "float32", "float64", "string", "bool", "uint64", "uint32", "uint16", +} + +// Verify whether the parameters meet the requirements +var validateParams = []string{"require", "pattern", "maxLength", "minLength", "maximum", "minimum", "maxItems", "minItems"} + +// CastError is used for cast type fails +type CastError struct { + Message *string +} + +// Request is used wrap http request +type Request struct { + Protocol *string + Port *int + Method *string + Pathname *string + Domain *string + Headers map[string]*string + Query map[string]*string + Body io.Reader +} + +// Response is use d wrap http response +type Response struct { + Body io.ReadCloser + StatusCode *int + StatusMessage *string + Headers map[string]*string +} + +// SDKError struct is used save error code and message +type SDKError struct { + Code *string + StatusCode *int + Message *string + Data *string + Stack *string + errMsg *string +} + +// RuntimeObject is used for converting http configuration +type RuntimeObject struct { + IgnoreSSL *bool `json:"ignoreSSL" xml:"ignoreSSL"` + ReadTimeout *int `json:"readTimeout" xml:"readTimeout"` + ConnectTimeout *int `json:"connectTimeout" xml:"connectTimeout"` + LocalAddr *string `json:"localAddr" xml:"localAddr"` + HttpProxy *string `json:"httpProxy" xml:"httpProxy"` + HttpsProxy *string `json:"httpsProxy" xml:"httpsProxy"` + NoProxy *string `json:"noProxy" xml:"noProxy"` + MaxIdleConns *int `json:"maxIdleConns" xml:"maxIdleConns"` + Key *string `json:"key" xml:"key"` + Cert *string `json:"cert" xml:"cert"` + CA *string `json:"ca" xml:"ca"` + Socks5Proxy *string `json:"socks5Proxy" xml:"socks5Proxy"` + Socks5NetWork *string `json:"socks5NetWork" xml:"socks5NetWork"` + Listener utils.ProgressListener `json:"listener" xml:"listener"` + Tracker *utils.ReaderTracker `json:"tracker" xml:"tracker"` + Logger *utils.Logger `json:"logger" xml:"logger"` +} + +type teaClient struct { + sync.Mutex + httpClient *http.Client + ifInit bool +} + +var clientPool = &sync.Map{} + +func (r *RuntimeObject) getClientTag(domain string) string { + return strconv.FormatBool(BoolValue(r.IgnoreSSL)) + strconv.Itoa(IntValue(r.ReadTimeout)) + + strconv.Itoa(IntValue(r.ConnectTimeout)) + StringValue(r.LocalAddr) + StringValue(r.HttpProxy) + + StringValue(r.HttpsProxy) + StringValue(r.NoProxy) + StringValue(r.Socks5Proxy) + StringValue(r.Socks5NetWork) + domain +} + +// NewRuntimeObject is used for shortly create runtime object +func NewRuntimeObject(runtime map[string]interface{}) *RuntimeObject { + if runtime == nil { + return &RuntimeObject{} + } + + runtimeObject := &RuntimeObject{ + IgnoreSSL: TransInterfaceToBool(runtime["ignoreSSL"]), + ReadTimeout: TransInterfaceToInt(runtime["readTimeout"]), + ConnectTimeout: TransInterfaceToInt(runtime["connectTimeout"]), + LocalAddr: TransInterfaceToString(runtime["localAddr"]), + HttpProxy: TransInterfaceToString(runtime["httpProxy"]), + HttpsProxy: TransInterfaceToString(runtime["httpsProxy"]), + NoProxy: TransInterfaceToString(runtime["noProxy"]), + MaxIdleConns: TransInterfaceToInt(runtime["maxIdleConns"]), + Socks5Proxy: TransInterfaceToString(runtime["socks5Proxy"]), + Socks5NetWork: TransInterfaceToString(runtime["socks5NetWork"]), + Key: TransInterfaceToString(runtime["key"]), + Cert: TransInterfaceToString(runtime["cert"]), + CA: TransInterfaceToString(runtime["ca"]), + } + if runtime["listener"] != nil { + runtimeObject.Listener = runtime["listener"].(utils.ProgressListener) + } + if runtime["tracker"] != nil { + runtimeObject.Tracker = runtime["tracker"].(*utils.ReaderTracker) + } + if runtime["logger"] != nil { + runtimeObject.Logger = runtime["logger"].(*utils.Logger) + } + return runtimeObject +} + +// NewCastError is used for cast type fails +func NewCastError(message *string) (err error) { + return &CastError{ + Message: message, + } +} + +// NewRequest is used shortly create Request +func NewRequest() (req *Request) { + return &Request{ + Headers: map[string]*string{}, + Query: map[string]*string{}, + } +} + +// NewResponse is create response with http response +func NewResponse(httpResponse *http.Response) (res *Response) { + res = &Response{} + res.Body = httpResponse.Body + res.Headers = make(map[string]*string) + res.StatusCode = Int(httpResponse.StatusCode) + res.StatusMessage = String(httpResponse.Status) + return +} + +// NewSDKError is used for shortly create SDKError object +func NewSDKError(obj map[string]interface{}) *SDKError { + err := &SDKError{} + if val, ok := obj["code"].(int); ok { + err.Code = String(strconv.Itoa(val)) + } else if val, ok := obj["code"].(string); ok { + err.Code = String(val) + } + + if obj["message"] != nil { + err.Message = String(obj["message"].(string)) + } + if data := obj["data"]; data != nil { + r := reflect.ValueOf(data) + if r.Kind().String() == "map" { + res := make(map[string]interface{}) + tmp := r.MapKeys() + for _, key := range tmp { + res[key.String()] = r.MapIndex(key).Interface() + } + if statusCode := res["statusCode"]; statusCode != nil { + if code, ok := statusCode.(int); ok { + err.StatusCode = Int(code) + } else if tmp, ok := statusCode.(string); ok { + code, err_ := strconv.Atoi(tmp) + if err_ == nil { + err.StatusCode = Int(code) + } + } + } + } + byt, _ := json.Marshal(data) + err.Data = String(string(byt)) + } + + if statusCode, ok := obj["statusCode"].(int); ok { + err.StatusCode = Int(statusCode) + } else if status, ok := obj["statusCode"].(string); ok { + statusCode, err_ := strconv.Atoi(status) + if err_ == nil { + err.StatusCode = Int(statusCode) + } + } + + return err +} + +// Set ErrMsg by msg +func (err *SDKError) SetErrMsg(msg string) { + err.errMsg = String(msg) +} + +func (err *SDKError) Error() string { + if err.errMsg == nil { + str := fmt.Sprintf("SDKError:\n StatusCode: %d\n Code: %s\n Message: %s\n Data: %s\n", + IntValue(err.StatusCode), StringValue(err.Code), StringValue(err.Message), StringValue(err.Data)) + err.SetErrMsg(str) + } + return StringValue(err.errMsg) +} + +// Return message of CastError +func (err *CastError) Error() string { + return StringValue(err.Message) +} + +// Convert is use convert map[string]interface object to struct +func Convert(in interface{}, out interface{}) error { + byt, _ := json.Marshal(in) + decoder := jsonParser.NewDecoder(bytes.NewReader(byt)) + decoder.UseNumber() + err := decoder.Decode(&out) + return err +} + +// Convert is use convert map[string]interface object to struct +func Recover(in interface{}) error { + if in == nil { + return nil + } + return errors.New(fmt.Sprint(in)) +} + +// ReadBody is used read response body +func (response *Response) ReadBody() (body []byte, err error) { + defer response.Body.Close() + var buffer [512]byte + result := bytes.NewBuffer(nil) + + for { + n, err := response.Body.Read(buffer[0:]) + result.Write(buffer[0:n]) + if err != nil && err == io.EOF { + break + } else if err != nil { + return nil, err + } + } + return result.Bytes(), nil +} + +func getTeaClient(tag string) *teaClient { + client, ok := clientPool.Load(tag) + if client == nil && !ok { + client = &teaClient{ + httpClient: &http.Client{}, + ifInit: false, + } + clientPool.Store(tag, client) + } + return client.(*teaClient) +} + +// DoRequest is used send request to server +func DoRequest(request *Request, requestRuntime map[string]interface{}) (response *Response, err error) { + runtimeObject := NewRuntimeObject(requestRuntime) + fieldMap := make(map[string]string) + utils.InitLogMsg(fieldMap) + defer func() { + if runtimeObject.Logger != nil { + runtimeObject.Logger.PrintLog(fieldMap, err) + } + }() + if request.Method == nil { + request.Method = String("GET") + } + + if request.Protocol == nil { + request.Protocol = String("http") + } else { + request.Protocol = String(strings.ToLower(StringValue(request.Protocol))) + } + + requestURL := "" + request.Domain = request.Headers["host"] + requestURL = fmt.Sprintf("%s://%s%s", StringValue(request.Protocol), StringValue(request.Domain), StringValue(request.Pathname)) + queryParams := request.Query + // sort QueryParams by key + q := url.Values{} + for key, value := range queryParams { + q.Add(key, StringValue(value)) + } + querystring := q.Encode() + if len(querystring) > 0 { + if strings.Contains(requestURL, "?") { + requestURL = fmt.Sprintf("%s&%s", requestURL, querystring) + } else { + requestURL = fmt.Sprintf("%s?%s", requestURL, querystring) + } + } + debugLog("> %s %s", StringValue(request.Method), requestURL) + + httpRequest, err := http.NewRequest(StringValue(request.Method), requestURL, request.Body) + if err != nil { + return + } + httpRequest.Host = StringValue(request.Domain) + + client := getTeaClient(runtimeObject.getClientTag(StringValue(request.Domain))) + client.Lock() + if !client.ifInit { + trans, err := getHttpTransport(request, runtimeObject) + if err != nil { + return nil, err + } + client.httpClient.Timeout = time.Duration(IntValue(runtimeObject.ReadTimeout)) * time.Millisecond + client.httpClient.Transport = trans + client.ifInit = true + } + client.Unlock() + for key, value := range request.Headers { + if value == nil || key == "content-length" { + continue + } else if key == "host" { + httpRequest.Header["Host"] = []string{*value} + delete(httpRequest.Header, "host") + } else if key == "user-agent" { + httpRequest.Header["User-Agent"] = []string{*value} + delete(httpRequest.Header, "user-agent") + } else { + httpRequest.Header[key] = []string{*value} + } + debugLog("> %s: %s", key, StringValue(value)) + } + contentlength, _ := strconv.Atoi(StringValue(request.Headers["content-length"])) + event := utils.NewProgressEvent(utils.TransferStartedEvent, 0, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + + putMsgToMap(fieldMap, httpRequest) + startTime := time.Now() + fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05") + res, err := hookDo(client.httpClient.Do)(httpRequest) + fieldMap["{cost}"] = time.Since(startTime).String() + completedBytes := int64(0) + if runtimeObject.Tracker != nil { + completedBytes = runtimeObject.Tracker.CompletedBytes + } + if err != nil { + event = utils.NewProgressEvent(utils.TransferFailedEvent, completedBytes, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + return + } + + event = utils.NewProgressEvent(utils.TransferCompletedEvent, completedBytes, int64(contentlength), 0) + utils.PublishProgress(runtimeObject.Listener, event) + + response = NewResponse(res) + fieldMap["{code}"] = strconv.Itoa(res.StatusCode) + fieldMap["{res_headers}"] = transToString(res.Header) + debugLog("< HTTP/1.1 %s", res.Status) + for key, value := range res.Header { + debugLog("< %s: %s", key, strings.Join(value, "")) + if len(value) != 0 { + response.Headers[strings.ToLower(key)] = String(value[0]) + } + } + return +} + +func getHttpTransport(req *Request, runtime *RuntimeObject) (*http.Transport, error) { + trans := new(http.Transport) + httpProxy, err := getHttpProxy(StringValue(req.Protocol), StringValue(req.Domain), runtime) + if err != nil { + return nil, err + } + if strings.ToLower(*req.Protocol) == "https" && + runtime.Key != nil && runtime.Cert != nil { + cert, err := tls.X509KeyPair([]byte(StringValue(runtime.Cert)), []byte(StringValue(runtime.Key))) + if err != nil { + return nil, err + } + + trans.TLSClientConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: BoolValue(runtime.IgnoreSSL), + } + if runtime.CA != nil { + clientCertPool := x509.NewCertPool() + ok := clientCertPool.AppendCertsFromPEM([]byte(StringValue(runtime.CA))) + if !ok { + return nil, errors.New("Failed to parse root certificate") + } + trans.TLSClientConfig.RootCAs = clientCertPool + } + } else { + trans.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: BoolValue(runtime.IgnoreSSL), + } + } + if httpProxy != nil { + trans.Proxy = http.ProxyURL(httpProxy) + if httpProxy.User != nil { + password, _ := httpProxy.User.Password() + auth := httpProxy.User.Username() + ":" + password + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Headers["Proxy-Authorization"] = String(basic) + } + } + if runtime.Socks5Proxy != nil && StringValue(runtime.Socks5Proxy) != "" { + socks5Proxy, err := getSocks5Proxy(runtime) + if err != nil { + return nil, err + } + if socks5Proxy != nil { + var auth *proxy.Auth + if socks5Proxy.User != nil { + password, _ := socks5Proxy.User.Password() + auth = &proxy.Auth{ + User: socks5Proxy.User.Username(), + Password: password, + } + } + dialer, err := proxy.SOCKS5(strings.ToLower(StringValue(runtime.Socks5NetWork)), socks5Proxy.String(), auth, + &net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Millisecond, + DualStack: true, + LocalAddr: getLocalAddr(StringValue(runtime.LocalAddr)), + }) + if err != nil { + return nil, err + } + trans.Dial = dialer.Dial + } + } else { + trans.DialContext = setDialContext(runtime) + } + return trans, nil +} + +func transToString(object interface{}) string { + byt, _ := json.Marshal(object) + return string(byt) +} + +func putMsgToMap(fieldMap map[string]string, request *http.Request) { + fieldMap["{host}"] = request.Host + fieldMap["{method}"] = request.Method + fieldMap["{uri}"] = request.URL.RequestURI() + fieldMap["{pid}"] = strconv.Itoa(os.Getpid()) + fieldMap["{version}"] = strings.Split(request.Proto, "/")[1] + hostname, _ := os.Hostname() + fieldMap["{hostname}"] = hostname + fieldMap["{req_headers}"] = transToString(request.Header) + fieldMap["{target}"] = request.URL.Path + request.URL.RawQuery +} + +func getNoProxy(protocol string, runtime *RuntimeObject) []string { + var urls []string + if runtime.NoProxy != nil && StringValue(runtime.NoProxy) != "" { + urls = strings.Split(StringValue(runtime.NoProxy), ",") + } else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } else if rawurl := os.Getenv("no_proxy"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } + + return urls +} + +func ToReader(obj interface{}) io.Reader { + switch obj.(type) { + case *string: + tmp := obj.(*string) + return strings.NewReader(StringValue(tmp)) + case []byte: + return strings.NewReader(string(obj.([]byte))) + case io.Reader: + return obj.(io.Reader) + default: + panic("Invalid Body. Please set a valid Body.") + } +} + +func ToString(val interface{}) string { + return fmt.Sprintf("%v", val) +} + +func getHttpProxy(protocol, host string, runtime *RuntimeObject) (proxy *url.URL, err error) { + urls := getNoProxy(protocol, runtime) + for _, url := range urls { + if url == host { + return nil, nil + } + } + if protocol == "https" { + if runtime.HttpsProxy != nil && StringValue(runtime.HttpsProxy) != "" { + proxy, err = url.Parse(StringValue(runtime.HttpsProxy)) + } else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("https_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } else { + if runtime.HttpProxy != nil && StringValue(runtime.HttpProxy) != "" { + proxy, err = url.Parse(StringValue(runtime.HttpProxy)) + } else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("http_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } + + return proxy, err +} + +func getSocks5Proxy(runtime *RuntimeObject) (proxy *url.URL, err error) { + if runtime.Socks5Proxy != nil && StringValue(runtime.Socks5Proxy) != "" { + proxy, err = url.Parse(StringValue(runtime.Socks5Proxy)) + } + return proxy, err +} + +func getLocalAddr(localAddr string) (addr *net.TCPAddr) { + if localAddr != "" { + addr = &net.TCPAddr{ + IP: []byte(localAddr), + } + } + return addr +} + +func setDialContext(runtime *RuntimeObject) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + if runtime.LocalAddr != nil && StringValue(runtime.LocalAddr) != "" { + netAddr := &net.TCPAddr{ + IP: []byte(StringValue(runtime.LocalAddr)), + } + return (&net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, + DualStack: true, + LocalAddr: netAddr, + }).DialContext(ctx, network, address) + } + return (&net.Dialer{ + Timeout: time.Duration(IntValue(runtime.ConnectTimeout)) * time.Second, + DualStack: true, + }).DialContext(ctx, network, address) + } +} + +func ToObject(obj interface{}) map[string]interface{} { + result := make(map[string]interface{}) + byt, _ := json.Marshal(obj) + err := json.Unmarshal(byt, &result) + if err != nil { + return nil + } + return result +} + +func AllowRetry(retry interface{}, retryTimes *int) *bool { + if IntValue(retryTimes) == 0 { + return Bool(true) + } + retryMap, ok := retry.(map[string]interface{}) + if !ok { + return Bool(false) + } + retryable, ok := retryMap["retryable"].(bool) + if !ok || !retryable { + return Bool(false) + } + + maxAttempts, ok := retryMap["maxAttempts"].(int) + if !ok || maxAttempts < IntValue(retryTimes) { + return Bool(false) + } + return Bool(true) +} + +func Merge(args ...interface{}) map[string]*string { + finalArg := make(map[string]*string) + for _, obj := range args { + switch obj.(type) { + case map[string]*string: + arg := obj.(map[string]*string) + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + default: + byt, _ := json.Marshal(obj) + arg := make(map[string]string) + err := json.Unmarshal(byt, &arg) + if err != nil { + return finalArg + } + for key, value := range arg { + if value != "" { + finalArg[key] = String(value) + } + } + } + } + + return finalArg +} + +func isNil(a interface{}) bool { + defer func() { + recover() + }() + vi := reflect.ValueOf(a) + return vi.IsNil() +} + +func ToMap(args ...interface{}) map[string]interface{} { + isNotNil := false + finalArg := make(map[string]interface{}) + for _, obj := range args { + if obj == nil { + continue + } + + if isNil(obj) { + continue + } + isNotNil = true + + switch obj.(type) { + case map[string]*string: + arg := obj.(map[string]*string) + for key, value := range arg { + if value != nil { + finalArg[key] = StringValue(value) + } + } + case map[string]interface{}: + arg := obj.(map[string]interface{}) + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + case *string: + str := obj.(*string) + arg := make(map[string]interface{}) + err := json.Unmarshal([]byte(StringValue(str)), &arg) + if err == nil { + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + } + tmp := make(map[string]string) + err = json.Unmarshal([]byte(StringValue(str)), &tmp) + if err == nil { + for key, value := range arg { + if value != "" { + finalArg[key] = value + } + } + } + case []byte: + byt := obj.([]byte) + arg := make(map[string]interface{}) + err := json.Unmarshal(byt, &arg) + if err == nil { + for key, value := range arg { + if value != nil { + finalArg[key] = value + } + } + break + } + default: + val := reflect.ValueOf(obj) + res := structToMap(val) + for key, value := range res { + if value != nil { + finalArg[key] = value + } + } + } + } + + if !isNotNil { + return nil + } + return finalArg +} + +func structToMap(dataValue reflect.Value) map[string]interface{} { + out := make(map[string]interface{}) + if !dataValue.IsValid() { + return out + } + if dataValue.Kind().String() == "ptr" { + if dataValue.IsNil() { + return out + } + dataValue = dataValue.Elem() + } + if !dataValue.IsValid() { + return out + } + dataType := dataValue.Type() + if dataType.Kind().String() != "struct" { + return out + } + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("json") + if !containsNameTag { + name = field.Name + } else { + strs := strings.Split(name, ",") + name = strs[0] + } + fieldValue := dataValue.FieldByName(field.Name) + if !fieldValue.IsValid() || fieldValue.IsNil() { + continue + } + if field.Type.String() == "io.Reader" || field.Type.String() == "io.Writer" { + continue + } else if field.Type.Kind().String() == "struct" { + out[name] = structToMap(fieldValue) + } else if field.Type.Kind().String() == "ptr" && + field.Type.Elem().Kind().String() == "struct" { + if fieldValue.Elem().IsValid() { + out[name] = structToMap(fieldValue) + } + } else if field.Type.Kind().String() == "ptr" { + if fieldValue.IsValid() && !fieldValue.IsNil() { + out[name] = fieldValue.Elem().Interface() + } + } else if field.Type.Kind().String() == "slice" { + tmp := make([]interface{}, 0) + num := fieldValue.Len() + for i := 0; i < num; i++ { + value := fieldValue.Index(i) + if !value.IsValid() { + continue + } + if value.Type().Kind().String() == "ptr" && + value.Type().Elem().Kind().String() == "struct" { + if value.IsValid() && !value.IsNil() { + tmp = append(tmp, structToMap(value)) + } + } else if value.Type().Kind().String() == "struct" { + tmp = append(tmp, structToMap(value)) + } else if value.Type().Kind().String() == "ptr" { + if value.IsValid() && !value.IsNil() { + tmp = append(tmp, value.Elem().Interface()) + } + } else { + tmp = append(tmp, value.Interface()) + } + } + if len(tmp) > 0 { + out[name] = tmp + } + } else { + out[name] = fieldValue.Interface() + } + + } + return out +} + +func Retryable(err error) *bool { + if err == nil { + return Bool(false) + } + if realErr, ok := err.(*SDKError); ok { + if realErr.StatusCode == nil { + return Bool(false) + } + code := IntValue(realErr.StatusCode) + return Bool(code >= http.StatusInternalServerError) + } + return Bool(true) +} + +func GetBackoffTime(backoff interface{}, retrytimes *int) *int { + backoffMap, ok := backoff.(map[string]interface{}) + if !ok { + return Int(0) + } + policy, ok := backoffMap["policy"].(string) + if !ok || policy == "no" { + return Int(0) + } + + period, ok := backoffMap["period"].(int) + if !ok || period == 0 { + return Int(0) + } + + maxTime := math.Pow(2.0, float64(IntValue(retrytimes))) + return Int(rand.Intn(int(maxTime-1)) * period) +} + +func Sleep(backoffTime *int) { + sleeptime := time.Duration(IntValue(backoffTime)) * time.Second + time.Sleep(sleeptime) +} + +func Validate(params interface{}) error { + if params == nil { + return nil + } + requestValue := reflect.ValueOf(params) + if requestValue.IsNil() { + return nil + } + err := validate(requestValue.Elem()) + return err +} + +// Verify whether the parameters meet the requirements +func validate(dataValue reflect.Value) error { + if strings.HasPrefix(dataValue.Type().String(), "*") { // Determines whether the input is a structure object or a pointer object + if dataValue.IsNil() { + return nil + } + dataValue = dataValue.Elem() + } + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + valueField := dataValue.Field(i) + for _, value := range validateParams { + err := validateParam(field, valueField, value) + if err != nil { + return err + } + } + } + return nil +} + +func validateParam(field reflect.StructField, valueField reflect.Value, tagName string) error { + tag, containsTag := field.Tag.Lookup(tagName) // Take out the checked regular expression + if containsTag && tagName == "require" { + err := checkRequire(field, valueField) + if err != nil { + return err + } + } + if strings.HasPrefix(field.Type.String(), "[]") { // Verify the parameters of the array type + err := validateSlice(field, valueField, containsTag, tag, tagName) + if err != nil { + return err + } + } else if valueField.Kind() == reflect.Ptr { // Determines whether it is a pointer object + err := validatePtr(field, valueField, containsTag, tag, tagName) + if err != nil { + return err + } + } + return nil +} + +func validateSlice(field reflect.StructField, valueField reflect.Value, containsregexpTag bool, tag, tagName string) error { + if valueField.IsValid() && !valueField.IsNil() { // Determines whether the parameter has a value + if containsregexpTag { + if tagName == "maxItems" { + err := checkMaxItems(field, valueField, tag) + if err != nil { + return err + } + } + + if tagName == "minItems" { + err := checkMinItems(field, valueField, tag) + if err != nil { + return err + } + } + } + + for m := 0; m < valueField.Len(); m++ { + elementValue := valueField.Index(m) + if elementValue.Type().Kind() == reflect.Ptr { // Determines whether the child elements of an array are of a basic type + err := validatePtr(field, elementValue, containsregexpTag, tag, tagName) + if err != nil { + return err + } + } + } + } + return nil +} + +func validatePtr(field reflect.StructField, elementValue reflect.Value, containsregexpTag bool, tag, tagName string) error { + if elementValue.IsNil() { + return nil + } + if isFilterType(elementValue.Elem().Type().String(), basicTypes) { + if containsregexpTag { + if tagName == "pattern" { + err := checkPattern(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "maxLength" { + err := checkMaxLength(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "minLength" { + err := checkMinLength(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "maximum" { + err := checkMaximum(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + + if tagName == "minimum" { + err := checkMinimum(field, elementValue.Elem(), tag) + if err != nil { + return err + } + } + } + } else { + err := validate(elementValue) + if err != nil { + return err + } + } + return nil +} + +func checkRequire(field reflect.StructField, valueField reflect.Value) error { + name, _ := field.Tag.Lookup("json") + strs := strings.Split(name, ",") + name = strs[0] + if !valueField.IsNil() && valueField.IsValid() { + return nil + } + return errors.New(name + " should be setted") +} + +func checkPattern(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + value := valueField.String() + r, _ := regexp.Compile("^" + tag + "$") + if match := r.MatchString(value); !match { // Determines whether the parameter value satisfies the regular expression or not, and throws an error + return errors.New(value + " is not matched " + tag) + } + } + return nil +} + +func checkMaxItems(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maxItems, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if maxItems < length { + errMsg := fmt.Sprintf("The length of %s is %d which is more than %d", field.Name, length, maxItems) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinItems(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() { + minItems, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if minItems > length { + errMsg := fmt.Sprintf("The length of %s is %d which is less than %d", field.Name, length, minItems) + return errors.New(errMsg) + } + } + return nil +} + +func checkMaxLength(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maxLength, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if valueField.Kind().String() == "string" { + length = strings.Count(valueField.String(), "") - 1 + } + if maxLength < length { + errMsg := fmt.Sprintf("The length of %s is %d which is more than %d", field.Name, length, maxLength) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinLength(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() { + minLength, err := strconv.Atoi(tag) + if err != nil { + return err + } + length := valueField.Len() + if valueField.Kind().String() == "string" { + length = strings.Count(valueField.String(), "") - 1 + } + if minLength > length { + errMsg := fmt.Sprintf("The length of %s is %d which is less than %d", field.Name, length, minLength) + return errors.New(errMsg) + } + } + return nil +} + +func checkMaximum(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + maximum, err := strconv.ParseFloat(tag, 64) + if err != nil { + return err + } + byt, _ := json.Marshal(valueField.Interface()) + num, err := strconv.ParseFloat(string(byt), 64) + if err != nil { + return err + } + if maximum < num { + errMsg := fmt.Sprintf("The size of %s is %f which is greater than %f", field.Name, num, maximum) + return errors.New(errMsg) + } + } + return nil +} + +func checkMinimum(field reflect.StructField, valueField reflect.Value, tag string) error { + if valueField.IsValid() && valueField.String() != "" { + minimum, err := strconv.ParseFloat(tag, 64) + if err != nil { + return err + } + + byt, _ := json.Marshal(valueField.Interface()) + num, err := strconv.ParseFloat(string(byt), 64) + if err != nil { + return err + } + if minimum > num { + errMsg := fmt.Sprintf("The size of %s is %f which is less than %f", field.Name, num, minimum) + return errors.New(errMsg) + } + } + return nil +} + +// Determines whether realType is in filterTypes +func isFilterType(realType string, filterTypes []string) bool { + for _, value := range filterTypes { + if value == realType { + return true + } + } + return false +} + +func TransInterfaceToBool(val interface{}) *bool { + if val == nil { + return nil + } + + return Bool(val.(bool)) +} + +func TransInterfaceToInt(val interface{}) *int { + if val == nil { + return nil + } + + return Int(val.(int)) +} + +func TransInterfaceToString(val interface{}) *string { + if val == nil { + return nil + } + + return String(val.(string)) +} + +func Prettify(i interface{}) string { + resp, _ := json.MarshalIndent(i, "", " ") + return string(resp) +} + +func ToInt(a *int32) *int { + return Int(int(Int32Value(a))) +} + +func ToInt32(a *int) *int32 { + return Int32(int32(IntValue(a))) +} diff --git a/vendor/github.com/alibabacloud-go/tea/tea/trans.go b/vendor/github.com/alibabacloud-go/tea/tea/trans.go new file mode 100644 index 0000000000..ded1642fa7 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/tea/trans.go @@ -0,0 +1,491 @@ +package tea + +func String(a string) *string { + return &a +} + +func StringValue(a *string) string { + if a == nil { + return "" + } + return *a +} + +func Int(a int) *int { + return &a +} + +func IntValue(a *int) int { + if a == nil { + return 0 + } + return *a +} + +func Int8(a int8) *int8 { + return &a +} + +func Int8Value(a *int8) int8 { + if a == nil { + return 0 + } + return *a +} + +func Int16(a int16) *int16 { + return &a +} + +func Int16Value(a *int16) int16 { + if a == nil { + return 0 + } + return *a +} + +func Int32(a int32) *int32 { + return &a +} + +func Int32Value(a *int32) int32 { + if a == nil { + return 0 + } + return *a +} + +func Int64(a int64) *int64 { + return &a +} + +func Int64Value(a *int64) int64 { + if a == nil { + return 0 + } + return *a +} + +func Bool(a bool) *bool { + return &a +} + +func BoolValue(a *bool) bool { + if a == nil { + return false + } + return *a +} + +func Uint(a uint) *uint { + return &a +} + +func UintValue(a *uint) uint { + if a == nil { + return 0 + } + return *a +} + +func Uint8(a uint8) *uint8 { + return &a +} + +func Uint8Value(a *uint8) uint8 { + if a == nil { + return 0 + } + return *a +} + +func Uint16(a uint16) *uint16 { + return &a +} + +func Uint16Value(a *uint16) uint16 { + if a == nil { + return 0 + } + return *a +} + +func Uint32(a uint32) *uint32 { + return &a +} + +func Uint32Value(a *uint32) uint32 { + if a == nil { + return 0 + } + return *a +} + +func Uint64(a uint64) *uint64 { + return &a +} + +func Uint64Value(a *uint64) uint64 { + if a == nil { + return 0 + } + return *a +} + +func Float32(a float32) *float32 { + return &a +} + +func Float32Value(a *float32) float32 { + if a == nil { + return 0 + } + return *a +} + +func Float64(a float64) *float64 { + return &a +} + +func Float64Value(a *float64) float64 { + if a == nil { + return 0 + } + return *a +} + +func IntSlice(a []int) []*int { + if a == nil { + return nil + } + res := make([]*int, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func IntValueSlice(a []*int) []int { + if a == nil { + return nil + } + res := make([]int, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int8Slice(a []int8) []*int8 { + if a == nil { + return nil + } + res := make([]*int8, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int8ValueSlice(a []*int8) []int8 { + if a == nil { + return nil + } + res := make([]int8, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int16Slice(a []int16) []*int16 { + if a == nil { + return nil + } + res := make([]*int16, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int16ValueSlice(a []*int16) []int16 { + if a == nil { + return nil + } + res := make([]int16, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int32Slice(a []int32) []*int32 { + if a == nil { + return nil + } + res := make([]*int32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int32ValueSlice(a []*int32) []int32 { + if a == nil { + return nil + } + res := make([]int32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Int64Slice(a []int64) []*int64 { + if a == nil { + return nil + } + res := make([]*int64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Int64ValueSlice(a []*int64) []int64 { + if a == nil { + return nil + } + res := make([]int64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func UintSlice(a []uint) []*uint { + if a == nil { + return nil + } + res := make([]*uint, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func UintValueSlice(a []*uint) []uint { + if a == nil { + return nil + } + res := make([]uint, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint8Slice(a []uint8) []*uint8 { + if a == nil { + return nil + } + res := make([]*uint8, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint8ValueSlice(a []*uint8) []uint8 { + if a == nil { + return nil + } + res := make([]uint8, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint16Slice(a []uint16) []*uint16 { + if a == nil { + return nil + } + res := make([]*uint16, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint16ValueSlice(a []*uint16) []uint16 { + if a == nil { + return nil + } + res := make([]uint16, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint32Slice(a []uint32) []*uint32 { + if a == nil { + return nil + } + res := make([]*uint32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint32ValueSlice(a []*uint32) []uint32 { + if a == nil { + return nil + } + res := make([]uint32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Uint64Slice(a []uint64) []*uint64 { + if a == nil { + return nil + } + res := make([]*uint64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Uint64ValueSlice(a []*uint64) []uint64 { + if a == nil { + return nil + } + res := make([]uint64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Float32Slice(a []float32) []*float32 { + if a == nil { + return nil + } + res := make([]*float32, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Float32ValueSlice(a []*float32) []float32 { + if a == nil { + return nil + } + res := make([]float32, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func Float64Slice(a []float64) []*float64 { + if a == nil { + return nil + } + res := make([]*float64, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func Float64ValueSlice(a []*float64) []float64 { + if a == nil { + return nil + } + res := make([]float64, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func StringSlice(a []string) []*string { + if a == nil { + return nil + } + res := make([]*string, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func StringSliceValue(a []*string) []string { + if a == nil { + return nil + } + res := make([]string, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} + +func BoolSlice(a []bool) []*bool { + if a == nil { + return nil + } + res := make([]*bool, len(a)) + for i := 0; i < len(a); i++ { + res[i] = &a[i] + } + return res +} + +func BoolSliceValue(a []*bool) []bool { + if a == nil { + return nil + } + res := make([]bool, len(a)) + for i := 0; i < len(a); i++ { + if a[i] != nil { + res[i] = *a[i] + } + } + return res +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/assert.go b/vendor/github.com/alibabacloud-go/tea/utils/assert.go new file mode 100644 index 0000000000..7ae677501a --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/assert.go @@ -0,0 +1,64 @@ +package utils + +import ( + "reflect" + "strings" + "testing" +) + +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + isNilableKind := containsKind( + []reflect.Kind{ + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice}, + kind) + + if isNilableKind && value.IsNil() { + return true + } + + return false +} + +func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { + for i := 0; i < len(kinds); i++ { + if kind == kinds[i] { + return true + } + } + + return false +} + +func AssertEqual(t *testing.T, a, b interface{}) { + if !reflect.DeepEqual(a, b) { + t.Errorf("%v != %v", a, b) + } +} + +func AssertNil(t *testing.T, object interface{}) { + if !isNil(object) { + t.Errorf("%v is not nil", object) + } +} + +func AssertNotNil(t *testing.T, object interface{}) { + if isNil(object) { + t.Errorf("%v is nil", object) + } +} + +func AssertContains(t *testing.T, contains string, msgAndArgs ...string) { + for _, value := range msgAndArgs { + if ok := strings.Contains(contains, value); !ok { + t.Errorf("%s does not contain %s", contains, value) + } + } +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/logger.go b/vendor/github.com/alibabacloud-go/tea/utils/logger.go new file mode 100644 index 0000000000..0513668876 --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/logger.go @@ -0,0 +1,109 @@ +package utils + +import ( + "io" + "log" + "strings" + "time" +) + +type Logger struct { + *log.Logger + formatTemplate string + isOpen bool + lastLogMsg string +} + +var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}` +var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"} +var logChannel string + +func InitLogMsg(fieldMap map[string]string) { + for _, value := range loggerParam { + fieldMap[value] = "" + } +} + +func (logger *Logger) SetFormatTemplate(template string) { + logger.formatTemplate = template + +} + +func (logger *Logger) GetFormatTemplate() string { + return logger.formatTemplate + +} + +func NewLogger(level string, channel string, out io.Writer, template string) *Logger { + if level == "" { + level = "info" + } + + logChannel = "AlibabaCloud" + if channel != "" { + logChannel = channel + } + log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile) + if template == "" { + template = defaultLoggerTemplate + } + + return &Logger{ + Logger: log, + formatTemplate: template, + isOpen: true, + } +} + +func (logger *Logger) OpenLogger() { + logger.isOpen = true +} + +func (logger *Logger) CloseLogger() { + logger.isOpen = false +} + +func (logger *Logger) SetIsopen(isopen bool) { + logger.isOpen = isopen +} + +func (logger *Logger) GetIsopen() bool { + return logger.isOpen +} + +func (logger *Logger) SetLastLogMsg(lastLogMsg string) { + logger.lastLogMsg = lastLogMsg +} + +func (logger *Logger) GetLastLogMsg() string { + return logger.lastLogMsg +} + +func SetLogChannel(channel string) { + logChannel = channel +} + +func (logger *Logger) PrintLog(fieldMap map[string]string, err error) { + if err != nil { + fieldMap["{error}"] = err.Error() + } + fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05") + fieldMap["{ts}"] = getTimeInFormatISO8601() + fieldMap["{channel}"] = logChannel + if logger != nil { + logMsg := logger.formatTemplate + for key, value := range fieldMap { + logMsg = strings.Replace(logMsg, key, value, -1) + } + logger.lastLogMsg = logMsg + if logger.isOpen == true { + logger.Output(2, logMsg) + } + } +} + +func getTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} diff --git a/vendor/github.com/alibabacloud-go/tea/utils/progress.go b/vendor/github.com/alibabacloud-go/tea/utils/progress.go new file mode 100644 index 0000000000..2f5364aead --- /dev/null +++ b/vendor/github.com/alibabacloud-go/tea/utils/progress.go @@ -0,0 +1,60 @@ +package utils + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func NewProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func PublishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +func GetProgressListener(obj interface{}) ProgressListener { + if obj == nil { + return nil + } + listener, ok := obj.(ProgressListener) + if !ok { + return nil + } + return listener +} + +type ReaderTracker struct { + CompletedBytes int64 +} diff --git a/vendor/github.com/aliyun/credentials-go/LICENSE b/vendor/github.com/aliyun/credentials-go/LICENSE new file mode 100644 index 0000000000..0c44dcefe3 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2009-present, Alibaba Cloud All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go new file mode 100644 index 0000000000..7bcaa9740a --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/access_key_credential.go @@ -0,0 +1,41 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// AccessKeyCredential is a kind of credential +type AccessKeyCredential struct { + AccessKeyId string + AccessKeySecret string +} + +func newAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} + +// GetAccessKeyId reutrns AccessKeyCreential's AccessKeyId +func (a *AccessKeyCredential) GetAccessKeyId() (*string, error) { + return tea.String(a.AccessKeyId), nil +} + +// GetAccessSecret reutrns AccessKeyCreential's AccessKeySecret +func (a *AccessKeyCredential) GetAccessKeySecret() (*string, error) { + return tea.String(a.AccessKeySecret), nil +} + +// GetSecurityToken is useless for AccessKeyCreential +func (a *AccessKeyCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken is useless for AccessKeyCreential +func (a *AccessKeyCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns AccessKeyCreential's type +func (a *AccessKeyCredential) GetType() *string { + return tea.String("access_key") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go new file mode 100644 index 0000000000..cca291621c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/bearer_token_credential.go @@ -0,0 +1,40 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// BearerTokenCredential is a kind of credential +type BearerTokenCredential struct { + BearerToken string +} + +// newBearerTokenCredential return a BearerTokenCredential object +func newBearerTokenCredential(token string) *BearerTokenCredential { + return &BearerTokenCredential{ + BearerToken: token, + } +} + +// GetAccessKeyId is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetAccessKeyId() (*string, error) { + return tea.String(""), nil +} + +// GetAccessSecret is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetAccessKeySecret() (*string, error) { + return tea.String(("")), nil +} + +// GetSecurityToken is useless for BearerTokenCredential +func (b *BearerTokenCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken reutrns BearerTokenCredential's BearerToken +func (b *BearerTokenCredential) GetBearerToken() *string { + return tea.String(b.BearerToken) +} + +// GetType reutrns BearerTokenCredential's type +func (b *BearerTokenCredential) GetType() *string { + return tea.String("bearer") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential.go b/vendor/github.com/aliyun/credentials-go/credentials/credential.go new file mode 100644 index 0000000000..62e647541c --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential.go @@ -0,0 +1,401 @@ +package credentials + +import ( + "bufio" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/alibabacloud-go/debug/debug" + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/response" + "github.com/aliyun/credentials-go/credentials/utils" +) + +var debuglog = debug.Init("credential") + +var hookParse = func(err error) error { + return err +} + +// Credential is an interface for getting actual credential +type Credential interface { + GetAccessKeyId() (*string, error) + GetAccessKeySecret() (*string, error) + GetSecurityToken() (*string, error) + GetBearerToken() *string + GetType() *string +} + +// Config is important when call NewCredential +type Config struct { + Type *string `json:"type"` + AccessKeyId *string `json:"access_key_id"` + AccessKeySecret *string `json:"access_key_secret"` + OIDCProviderArn *string `json:"oidc_provider_arn"` + OIDCTokenFilePath *string `json:"oidc_token"` + RoleArn *string `json:"role_arn"` + RoleSessionName *string `json:"role_session_name"` + PublicKeyId *string `json:"public_key_id"` + RoleName *string `json:"role_name"` + SessionExpiration *int `json:"session_expiration"` + PrivateKeyFile *string `json:"private_key_file"` + BearerToken *string `json:"bearer_token"` + SecurityToken *string `json:"security_token"` + RoleSessionExpiration *int `json:"role_session_expiratioon"` + Policy *string `json:"policy"` + Host *string `json:"host"` + Timeout *int `json:"timeout"` + ConnectTimeout *int `json:"connect_timeout"` + Proxy *string `json:"proxy"` + InAdvanceScale *float64 `json:"inAdvanceScale"` + Url *string `json:"url"` +} + +func (s Config) String() string { + return tea.Prettify(s) +} + +func (s Config) GoString() string { + return s.String() +} + +func (s *Config) SetAccessKeyId(v string) *Config { + s.AccessKeyId = &v + return s +} + +func (s *Config) SetAccessKeySecret(v string) *Config { + s.AccessKeySecret = &v + return s +} + +func (s *Config) SetSecurityToken(v string) *Config { + s.SecurityToken = &v + return s +} + +func (s *Config) SetRoleArn(v string) *Config { + s.RoleArn = &v + return s +} + +func (s *Config) SetRoleSessionName(v string) *Config { + s.RoleSessionName = &v + return s +} + +func (s *Config) SetPublicKeyId(v string) *Config { + s.PublicKeyId = &v + return s +} + +func (s *Config) SetRoleName(v string) *Config { + s.RoleName = &v + return s +} + +func (s *Config) SetSessionExpiration(v int) *Config { + s.SessionExpiration = &v + return s +} + +func (s *Config) SetPrivateKeyFile(v string) *Config { + s.PrivateKeyFile = &v + return s +} + +func (s *Config) SetBearerToken(v string) *Config { + s.BearerToken = &v + return s +} + +func (s *Config) SetRoleSessionExpiration(v int) *Config { + s.RoleSessionExpiration = &v + return s +} + +func (s *Config) SetPolicy(v string) *Config { + s.Policy = &v + return s +} + +func (s *Config) SetHost(v string) *Config { + s.Host = &v + return s +} + +func (s *Config) SetTimeout(v int) *Config { + s.Timeout = &v + return s +} + +func (s *Config) SetConnectTimeout(v int) *Config { + s.ConnectTimeout = &v + return s +} + +func (s *Config) SetProxy(v string) *Config { + s.Proxy = &v + return s +} + +func (s *Config) SetType(v string) *Config { + s.Type = &v + return s +} + +func (s *Config) SetOIDCTokenFilePath(v string) *Config { + s.OIDCTokenFilePath = &v + return s +} + +func (s *Config) SetOIDCProviderArn(v string) *Config { + s.OIDCProviderArn = &v + return s +} + +func (s *Config) SetURLCredential(v string) *Config { + if v == "" { + v = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + s.Url = &v + return s +} + +// NewCredential return a credential according to the type in config. +// if config is nil, the function will use default provider chain to get credential. +// please see README.md for detail. +func NewCredential(config *Config) (credential Credential, err error) { + if config == nil { + config, err = defaultChain.resolve() + if err != nil { + return + } + return NewCredential(config) + } + switch tea.StringValue(config.Type) { + case "credentials_uri": + credential = newURLCredential(tea.StringValue(config.Url)) + case "oidc_role_arn": + err = checkoutAssumeRamoidc(config) + if err != nil { + return + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newOIDCRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.OIDCProviderArn), tea.StringValue(config.OIDCTokenFilePath), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime) + case "access_key": + err = checkAccessKey(config) + if err != nil { + return + } + credential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret)) + case "sts": + err = checkSTS(config) + if err != nil { + return + } + credential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken)) + case "ecs_ram_role": + checkEcsRAMRole(config) + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), tea.Float64Value(config.InAdvanceScale), runtime) + case "ram_role_arn": + err = checkRAMRoleArn(config) + if err != nil { + return + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newRAMRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime) + case "rsa_key_pair": + err = checkRSAKeyPair(config) + if err != nil { + return + } + file, err1 := os.Open(tea.StringValue(config.PrivateKeyFile)) + if err1 != nil { + err = fmt.Errorf("InvalidPath: Can not open PrivateKeyFile, err is %s", err1.Error()) + return + } + defer file.Close() + var privateKey string + scan := bufio.NewScanner(file) + for scan.Scan() { + if strings.HasPrefix(scan.Text(), "----") { + continue + } + privateKey += scan.Text() + "\n" + } + runtime := &utils.Runtime{ + Host: tea.StringValue(config.Host), + Proxy: tea.StringValue(config.Proxy), + ReadTimeout: tea.IntValue(config.Timeout), + ConnectTimeout: tea.IntValue(config.ConnectTimeout), + } + credential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime) + case "bearer": + if tea.StringValue(config.BearerToken) == "" { + err = errors.New("BearerToken cannot be empty") + return + } + credential = newBearerTokenCredential(tea.StringValue(config.BearerToken)) + default: + err = errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + return + } + return credential, nil +} + +func checkRSAKeyPair(config *Config) (err error) { + if tea.StringValue(config.PrivateKeyFile) == "" { + err = errors.New("PrivateKeyFile cannot be empty") + return + } + if tea.StringValue(config.PublicKeyId) == "" { + err = errors.New("PublicKeyId cannot be empty") + return + } + return +} + +func checkoutAssumeRamoidc(config *Config) (err error) { + if tea.StringValue(config.RoleArn) == "" { + err = errors.New("RoleArn cannot be empty") + return + } + if tea.StringValue(config.OIDCProviderArn) == "" { + err = errors.New("OIDCProviderArn cannot be empty") + return + } + return +} + +func checkRAMRoleArn(config *Config) (err error) { + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + if tea.StringValue(config.RoleArn) == "" { + err = errors.New("RoleArn cannot be empty") + return + } + if tea.StringValue(config.RoleSessionName) == "" { + err = errors.New("RoleSessionName cannot be empty") + return + } + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + return +} + +func checkEcsRAMRole(config *Config) (err error) { + return +} + +func checkSTS(config *Config) (err error) { + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + if tea.StringValue(config.SecurityToken) == "" { + err = errors.New("SecurityToken cannot be empty") + return + } + return +} + +func checkAccessKey(config *Config) (err error) { + if tea.StringValue(config.AccessKeyId) == "" { + err = errors.New("AccessKeyId cannot be empty") + return + } + if tea.StringValue(config.AccessKeySecret) == "" { + err = errors.New("AccessKeySecret cannot be empty") + return + } + return +} + +func doAction(request *request.CommonRequest, runtime *utils.Runtime) (content []byte, err error) { + var urlEncoded string + if request.BodyParams != nil { + urlEncoded = utils.GetURLFormedMap(request.BodyParams) + } + httpRequest, err := http.NewRequest(request.Method, request.URL, strings.NewReader(urlEncoded)) + if err != nil { + return + } + httpRequest.Proto = "HTTP/1.1" + httpRequest.Host = request.Domain + debuglog("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto) + debuglog("> Host: %s", httpRequest.Host) + for key, value := range request.Headers { + if value != "" { + debuglog("> %s: %s", key, value) + httpRequest.Header[key] = []string{value} + } + } + debuglog(">") + httpClient := &http.Client{} + httpClient.Timeout = time.Duration(runtime.ReadTimeout) * time.Second + proxy := &url.URL{} + if runtime.Proxy != "" { + proxy, err = url.Parse(runtime.Proxy) + if err != nil { + return + } + } + trans := &http.Transport{} + if proxy != nil && runtime.Proxy != "" { + trans.Proxy = http.ProxyURL(proxy) + } + trans.DialContext = utils.Timeout(time.Duration(runtime.ConnectTimeout) * time.Second) + httpClient.Transport = trans + httpResponse, err := hookDo(httpClient.Do)(httpRequest) + if err != nil { + return + } + debuglog("< %s %s", httpResponse.Proto, httpResponse.Status) + for key, value := range httpResponse.Header { + debuglog("< %s: %v", key, strings.Join(value, "")) + } + debuglog("<") + + resp := &response.CommonResponse{} + err = hookParse(resp.ParseFromHTTPResponse(httpResponse)) + if err != nil { + return + } + debuglog("%s", resp.GetHTTPContentString()) + if resp.GetHTTPStatus() != http.StatusOK { + err = fmt.Errorf("httpStatus: %d, message = %s", resp.GetHTTPStatus(), resp.GetHTTPContentString()) + return + } + return resp.GetHTTPContentBytes(), nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go b/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go new file mode 100644 index 0000000000..8d4433b7ea --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/credential_updater.go @@ -0,0 +1,25 @@ +package credentials + +import ( + "net/http" + "time" +) + +const defaultInAdvanceScale = 0.95 + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +type credentialUpdater struct { + credentialExpiration int + lastUpdateTimestamp int64 + inAdvanceScale float64 +} + +func (updater *credentialUpdater) needUpdateCredential() (result bool) { + if updater.inAdvanceScale == 0 { + updater.inAdvanceScale = defaultInAdvanceScale + } + return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go new file mode 100644 index 0000000000..5e7ddf4dec --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/ecs_ram_role.go @@ -0,0 +1,149 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +// EcsRAMRoleCredential is a kind of credential +type EcsRAMRoleCredential struct { + *credentialUpdater + RoleName string + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type ecsRAMRoleResponse struct { + Code string `json:"Code" xml:"Code"` + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newEcsRAMRoleCredential(roleName string, inAdvanceScale float64, runtime *utils.Runtime) *EcsRAMRoleCredential { + credentialUpdater := new(credentialUpdater) + if inAdvanceScale < 1 && inAdvanceScale > 0 { + credentialUpdater.inAdvanceScale = inAdvanceScale + } + return &EcsRAMRoleCredential{ + RoleName: roleName, + credentialUpdater: credentialUpdater, + runtime: runtime, + } +} + +// GetAccessKeyId reutrns EcsRAMRoleCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetAccessKeyId() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeyId, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns EcsRAMRoleCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetAccessKeySecret() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeySecret, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns EcsRAMRoleCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (e *EcsRAMRoleCredential) GetSecurityToken() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.SecurityToken, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless for EcsRAMRoleCredential +func (e *EcsRAMRoleCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns EcsRAMRoleCredential's type +func (e *EcsRAMRoleCredential) GetType() *string { + return tea.String("ecs_ram_role") +} + +func getRoleName() (string, error) { + runtime := utils.NewRuntime(1, 1, "", "") + request := request.NewCommonRequest() + request.URL = securityCredURL + request.Method = "GET" + content, err := doAction(request, runtime) + if err != nil { + return "", err + } + return string(content), nil +} + +func (e *EcsRAMRoleCredential) updateCredential() (err error) { + if e.runtime == nil { + e.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + if e.RoleName == "" { + e.RoleName, err = getRoleName() + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + } + request.URL = securityCredURL + e.RoleName + request.Method = "GET" + content, err := doAction(request, e.runtime) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + var resp *ecsRAMRoleResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error()) + } + if resp.Code != "Success" { + return fmt.Errorf("refresh Ecs sts token err: Code is not Success") + } + if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" { + return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration) + e.lastUpdateTimestamp = time.Now().Unix() + e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + e.sessionCredential = &sessionCredential{ + AccessKeyId: resp.AccessKeyId, + AccessKeySecret: resp.AccessKeySecret, + SecurityToken: resp.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go new file mode 100644 index 0000000000..89df42f8c3 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/env_provider.go @@ -0,0 +1,47 @@ +package credentials + +import ( + "errors" + "os" + + "github.com/alibabacloud-go/tea/tea" +) + +type envProvider struct{} + +var providerEnv = new(envProvider) + +const ( + // EnvVarAccessKeyId is a name of ALIBABA_CLOUD_ACCESS_KEY_Id + EnvVarAccessKeyId = "ALIBABA_CLOUD_ACCESS_KEY_Id" + EnvVarAccessKeyIdNew = "ALIBABA_CLOUD_ACCESS_KEY_ID" + // EnvVarAccessKeySecret is a name of ALIBABA_CLOUD_ACCESS_KEY_SECRET + EnvVarAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" +) + +func newEnvProvider() Provider { + return &envProvider{} +} + +func (p *envProvider) resolve() (*Config, error) { + accessKeyId, ok1 := os.LookupEnv(EnvVarAccessKeyIdNew) + if !ok1 || accessKeyId == "" { + accessKeyId, ok1 = os.LookupEnv(EnvVarAccessKeyId) + } + accessKeySecret, ok2 := os.LookupEnv(EnvVarAccessKeySecret) + if !ok1 || !ok2 { + return nil, nil + } + if accessKeyId == "" { + return nil, errors.New(EnvVarAccessKeyIdNew + " or " + EnvVarAccessKeyId + " cannot be empty") + } + if accessKeySecret == "" { + return nil, errors.New(EnvVarAccessKeySecret + " cannot be empty") + } + config := &Config{ + Type: tea.String("access_key"), + AccessKeyId: tea.String(accessKeyId), + AccessKeySecret: tea.String(accessKeySecret), + } + return config, nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go new file mode 100644 index 0000000000..7e2ea07bb7 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/instance_provider.go @@ -0,0 +1,28 @@ +package credentials + +import ( + "os" + + "github.com/alibabacloud-go/tea/tea" +) + +type instanceCredentialsProvider struct{} + +var providerInstance = new(instanceCredentialsProvider) + +func newInstanceCredentialsProvider() Provider { + return &instanceCredentialsProvider{} +} + +func (p *instanceCredentialsProvider) resolve() (*Config, error) { + roleName, ok := os.LookupEnv(ENVEcsMetadata) + if !ok { + return nil, nil + } + + config := &Config{ + Type: tea.String("ecs_ram_role"), + RoleName: tea.String(roleName), + } + return config, nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go new file mode 100644 index 0000000000..76192cbeef --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_credential.go @@ -0,0 +1,178 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +const defaultOIDCDurationSeconds = 3600 + +// OIDCCredential is a kind of credentials +type OIDCCredential struct { + *credentialUpdater + AccessKeyId string + AccessKeySecret string + RoleArn string + OIDCProviderArn string + OIDCTokenFilePath string + Policy string + RoleSessionName string + RoleSessionExpiration int + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type OIDCResponse struct { + Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"` +} + +type OIDCcredentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newOIDCRoleArnCredential(accessKeyId, accessKeySecret, roleArn, OIDCProviderArn, OIDCTokenFilePath, RoleSessionName, policy string, RoleSessionExpiration int, runtime *utils.Runtime) *OIDCCredential { + return &OIDCCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + OIDCProviderArn: OIDCProviderArn, + OIDCTokenFilePath: OIDCTokenFilePath, + RoleSessionName: RoleSessionName, + Policy: policy, + RoleSessionExpiration: RoleSessionExpiration, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns OIDCCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns OIDCCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns OIDCCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (r *OIDCCredential) GetSecurityToken() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless OIDCCredential +func (r *OIDCCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns OIDCCredential's type +func (r *OIDCCredential) GetType() *string { + return tea.String("oidc_role_arn") +} + +func (r *OIDCCredential) GetOIDCToken(OIDCTokenFilePath string) *string { + tokenPath := OIDCTokenFilePath + _, err := os.Stat(tokenPath) + if os.IsNotExist(err) { + tokenPath = os.Getenv("ALIBABA_CLOUD_OIDC_TOKEN_FILE") + if tokenPath == "" { + return nil + } + } + byt, err := ioutil.ReadFile(tokenPath) + if err != nil { + return nil + } + return tea.String(string(byt)) +} + +func (r *OIDCCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + request.Scheme = "HTTPS" + request.Method = "POST" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["Action"] = "AssumeRoleWithOIDC" + request.QueryParams["Format"] = "JSON" + request.BodyParams["RoleArn"] = r.RoleArn + request.BodyParams["OIDCProviderArn"] = r.OIDCProviderArn + token := r.GetOIDCToken(r.OIDCTokenFilePath) + request.BodyParams["OIDCToken"] = tea.StringValue(token) + if r.Policy != "" { + request.QueryParams["Policy"] = r.Policy + } + request.QueryParams["RoleSessionName"] = r.RoleSessionName + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["SignatureNonce"] = utils.GetUUID() + if r.AccessKeyId != "" && r.AccessKeySecret != "" { + signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&") + request.QueryParams["Signature"] = signature + request.QueryParams["AccessKeyId"] = r.AccessKeyId + request.QueryParams["AccessKeySecret"] = r.AccessKeySecret + } + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.Headers["content-type"] = "application/x-www-form-urlencoded" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error()) + } + var resp *OIDCResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.Credentials == nil { + return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty") + } + respCredentials := resp.Credentials + if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" { + return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: respCredentials.AccessKeyId, + AccessKeySecret: respCredentials.AccessKeySecret, + SecurityToken: respCredentials.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/oidc_token b/vendor/github.com/aliyun/credentials-go/credentials/oidc_token new file mode 100644 index 0000000000..653e068dfd --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/oidc_token @@ -0,0 +1 @@ +test_long_oidc_token_eyJhbGciOiJSUzI1NiIsImtpZCI6ImFQaXlpNEVGSU8wWnlGcFh1V0psQUNWbklZVlJsUkNmM2tlSzNMUlhWT1UifQ.eyJhdWQiOlsic3RzLmFsaXl1bmNzLmNvbSJdLCJleHAiOjE2NDUxMTk3ODAsImlhdCI6MTY0NTA4Mzc4MCwiaXNzIjoiaHR0cHM6Ly9vaWRjLWFjay1jbi1oYW5nemhvdS5vc3MtY24taGFuZ3pob3UtaW50ZXJuYWwuYWxpeXVuY3MuY29tL2NmMWQ4ZGIwMjM0ZDk0YzEyOGFiZDM3MTc4NWJjOWQxNSIsImt1YmVybmV0ZXMuaW8iOnsibmFtZXNwYWNlIjoidGVzdC1ycnNhIiwicG9kIjp7Im5hbWUiOiJydW4tYXMtcm9vdCIsInVpZCI6ImIzMGI0MGY2LWNiZTAtNGY0Yy1hZGYyLWM1OGQ4ZmExZTAxMCJ9LCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoidXNlcjEiLCJ1aWQiOiJiZTEyMzdjYS01MTY4LTQyMzYtYWUyMC00NDM1YjhmMGI4YzAifX0sIm5iZiI6MTY0NTA4Mzc4MCwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OnRlc3QtcnJzYTp1c2VyMSJ9.XGP-wgLj-iMiAHjLe0lZLh7y48Qsj9HzsEbNh706WwerBoxnssdsyGFb9lzd2FyM8CssbAOCstr7OuAMWNdJmDZgpiOGGSbQ-KXXmbfnIS4ix-V3pQF6LVBFr7xJlj20J6YY89um3rv_04t0iCGxKWs2ZMUyU1FbZpIPRep24LVKbUz1saiiVGgDBTIZdHA13Z-jUvYAnsxK_Kj5tc1K-IuQQU0IwSKJh5OShMcdPugMV5LwTL3ogCikfB7yljq5vclBhCeF2lXLIibvwF711TOhuJ5lMlh-a2KkIgwBHhANg_U9k4Mt_VadctfUGc4hxlSbBD0w9o9mDGKwgGmW5Q \ No newline at end of file diff --git a/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go new file mode 100644 index 0000000000..d6292b0351 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/profile_provider.go @@ -0,0 +1,350 @@ +package credentials + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + + "github.com/alibabacloud-go/tea/tea" + ini "gopkg.in/ini.v1" +) + +type profileProvider struct { + Profile string +} + +var providerProfile = newProfileProvider() + +var hookOS = func(goos string) string { + return goos +} + +var hookState = func(info os.FileInfo, err error) (os.FileInfo, error) { + return info, err +} + +// NewProfileProvider receive zero or more parameters, +// when length of name is 0, the value of field Profile will be "default", +// and when there are multiple inputs, the function will take the +// first one and discard the other values. +func newProfileProvider(name ...string) Provider { + p := new(profileProvider) + if len(name) == 0 { + p.Profile = "default" + } else { + p.Profile = name[0] + } + return p +} + +// resolve implements the Provider interface +// when credential type is rsa_key_pair, the content of private_key file +// must be able to be parsed directly into the required string +// that NewRsaKeyPairCredential function needed +func (p *profileProvider) resolve() (*Config, error) { + path, ok := os.LookupEnv(ENVCredentialFile) + if !ok { + path, err := checkDefaultPath() + if err != nil { + return nil, err + } + if path == "" { + return nil, nil + } + } else if path == "" { + return nil, errors.New(ENVCredentialFile + " cannot be empty") + } + + value, section, err := getType(path, p.Profile) + if err != nil { + return nil, err + } + switch value.String() { + case "access_key": + config, err := getAccessKey(section) + if err != nil { + return nil, err + } + return config, nil + case "sts": + config, err := getSTS(section) + if err != nil { + return nil, err + } + return config, nil + case "bearer": + config, err := getBearerToken(section) + if err != nil { + return nil, err + } + return config, nil + case "ecs_ram_role": + config, err := getEcsRAMRole(section) + if err != nil { + return nil, err + } + return config, nil + case "ram_role_arn": + config, err := getRAMRoleArn(section) + if err != nil { + return nil, err + } + return config, nil + case "rsa_key_pair": + config, err := getRSAKeyPair(section) + if err != nil { + return nil, err + } + return config, nil + default: + return nil, errors.New("Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair") + } +} + +func getRSAKeyPair(section *ini.Section) (*Config, error) { + publicKeyId, err := section.GetKey("public_key_id") + if err != nil { + return nil, errors.New("Missing required public_key_id option in profile for rsa_key_pair") + } + if publicKeyId.String() == "" { + return nil, errors.New("public_key_id cannot be empty") + } + privateKeyFile, err := section.GetKey("private_key_file") + if err != nil { + return nil, errors.New("Missing required private_key_file option in profile for rsa_key_pair") + } + if privateKeyFile.String() == "" { + return nil, errors.New("private_key_file cannot be empty") + } + sessionExpiration, _ := section.GetKey("session_expiration") + expiration := 0 + if sessionExpiration != nil { + expiration, err = sessionExpiration.Int() + if err != nil { + return nil, errors.New("session_expiration must be an int") + } + } + config := &Config{ + Type: tea.String("rsa_key_pair"), + PublicKeyId: tea.String(publicKeyId.String()), + PrivateKeyFile: tea.String(privateKeyFile.String()), + SessionExpiration: tea.Int(expiration), + } + err = setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getRAMRoleArn(section *ini.Section) (*Config, error) { + accessKeyId, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for ram_role_arn") + } + if accessKeyId.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for ram_role_arn") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + roleArn, err := section.GetKey("role_arn") + if err != nil { + return nil, errors.New("Missing required role_arn option in profile for ram_role_arn") + } + if roleArn.String() == "" { + return nil, errors.New("role_arn cannot be empty") + } + roleSessionName, err := section.GetKey("role_session_name") + if err != nil { + return nil, errors.New("Missing required role_session_name option in profile for ram_role_arn") + } + if roleSessionName.String() == "" { + return nil, errors.New("role_session_name cannot be empty") + } + roleSessionExpiration, _ := section.GetKey("role_session_expiration") + expiration := 0 + if roleSessionExpiration != nil { + expiration, err = roleSessionExpiration.Int() + if err != nil { + return nil, errors.New("role_session_expiration must be an int") + } + } + config := &Config{ + Type: tea.String("ram_role_arn"), + AccessKeyId: tea.String(accessKeyId.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + RoleArn: tea.String(roleArn.String()), + RoleSessionName: tea.String(roleSessionName.String()), + RoleSessionExpiration: tea.Int(expiration), + } + err = setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getEcsRAMRole(section *ini.Section) (*Config, error) { + roleName, _ := section.GetKey("role_name") + config := &Config{ + Type: tea.String("ecs_ram_role"), + } + if roleName != nil { + config.RoleName = tea.String(roleName.String()) + } + err := setRuntimeToConfig(config, section) + if err != nil { + return nil, err + } + return config, nil +} + +func getBearerToken(section *ini.Section) (*Config, error) { + bearerToken, err := section.GetKey("bearer_token") + if err != nil { + return nil, errors.New("Missing required bearer_token option in profile for bearer") + } + if bearerToken.String() == "" { + return nil, errors.New("bearer_token cannot be empty") + } + config := &Config{ + Type: tea.String("bearer"), + BearerToken: tea.String(bearerToken.String()), + } + return config, nil +} + +func getSTS(section *ini.Section) (*Config, error) { + accesskeyid, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for sts") + } + if accesskeyid.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for sts") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + securityToken, err := section.GetKey("security_token") + if err != nil { + return nil, errors.New("Missing required security_token option in profile for sts") + } + if securityToken.String() == "" { + return nil, errors.New("security_token cannot be empty") + } + config := &Config{ + Type: tea.String("sts"), + AccessKeyId: tea.String(accesskeyid.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + SecurityToken: tea.String(securityToken.String()), + } + return config, nil +} + +func getAccessKey(section *ini.Section) (*Config, error) { + accesskeyid, err := section.GetKey("access_key_id") + if err != nil { + return nil, errors.New("Missing required access_key_id option in profile for access_key") + } + if accesskeyid.String() == "" { + return nil, errors.New("access_key_id cannot be empty") + } + accessKeySecret, err := section.GetKey("access_key_secret") + if err != nil { + return nil, errors.New("Missing required access_key_secret option in profile for access_key") + } + if accessKeySecret.String() == "" { + return nil, errors.New("access_key_secret cannot be empty") + } + config := &Config{ + Type: tea.String("access_key"), + AccessKeyId: tea.String(accesskeyid.String()), + AccessKeySecret: tea.String(accessKeySecret.String()), + } + return config, nil +} + +func getType(path, profile string) (*ini.Key, *ini.Section, error) { + ini, err := ini.Load(path) + if err != nil { + return nil, nil, errors.New("ERROR: Can not open file " + err.Error()) + } + + section, err := ini.GetSection(profile) + if err != nil { + return nil, nil, errors.New("ERROR: Can not load section " + err.Error()) + } + + value, err := section.GetKey("type") + if err != nil { + return nil, nil, errors.New("Missing required type option " + err.Error()) + } + return value, section, nil +} + +func getHomePath() string { + if hookOS(runtime.GOOS) == "windows" { + path, ok := os.LookupEnv("USERPROFILE") + if !ok { + return "" + } + return path + } + path, ok := os.LookupEnv("HOME") + if !ok { + return "" + } + return path +} + +func checkDefaultPath() (path string, err error) { + path = getHomePath() + if path == "" { + return "", errors.New("The default credential file path is invalid") + } + path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1) + _, err = hookState(os.Stat(path)) + if err != nil { + return "", nil + } + return path, nil +} + +func setRuntimeToConfig(config *Config, section *ini.Section) error { + rawTimeout, _ := section.GetKey("timeout") + rawConnectTimeout, _ := section.GetKey("connect_timeout") + rawProxy, _ := section.GetKey("proxy") + rawHost, _ := section.GetKey("host") + if rawProxy != nil { + config.Proxy = tea.String(rawProxy.String()) + } + if rawConnectTimeout != nil { + connectTimeout, err := rawConnectTimeout.Int() + if err != nil { + return fmt.Errorf("Please set connect_timeout with an int value") + } + config.ConnectTimeout = tea.Int(connectTimeout) + } + if rawTimeout != nil { + timeout, err := rawTimeout.Int() + if err != nil { + return fmt.Errorf("Please set timeout with an int value") + } + config.Timeout = tea.Int(timeout) + } + if rawHost != nil { + config.Host = tea.String(rawHost.String()) + } + return nil +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider.go b/vendor/github.com/aliyun/credentials-go/credentials/provider.go new file mode 100644 index 0000000000..f9d4ae5748 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider.go @@ -0,0 +1,13 @@ +package credentials + +//Environmental virables that may be used by the provider +const ( + ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" + ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" + PATHCredentialFile = "~/.alibabacloud/credentials" +) + +// Provider will be implemented When you want to customize the provider. +type Provider interface { + resolve() (*Config, error) +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go new file mode 100644 index 0000000000..2764a701cd --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/provider_chain.go @@ -0,0 +1,32 @@ +package credentials + +import ( + "errors" +) + +type providerChain struct { + Providers []Provider +} + +var defaultproviders = []Provider{providerEnv, providerProfile, providerInstance} +var defaultChain = newProviderChain(defaultproviders) + +func newProviderChain(providers []Provider) Provider { + return &providerChain{ + Providers: providers, + } +} + +func (p *providerChain) resolve() (*Config, error) { + for _, provider := range p.Providers { + config, err := provider.resolve() + if err != nil { + return nil, err + } else if config == nil { + continue + } + return config, err + } + return nil, errors.New("No credential found") + +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go b/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go new file mode 100644 index 0000000000..47a92a7663 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/request/common_request.go @@ -0,0 +1,63 @@ +package request + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/aliyun/credentials-go/credentials/utils" +) + +// CommonRequest is for requesting credential +type CommonRequest struct { + Scheme string + Method string + Domain string + RegionId string + URL string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + BodyParams map[string]string + userAgent map[string]string + QueryParams map[string]string + Headers map[string]string + + queries string +} + +// NewCommonRequest returns a CommonRequest +func NewCommonRequest() *CommonRequest { + return &CommonRequest{ + BodyParams: make(map[string]string), + QueryParams: make(map[string]string), + Headers: make(map[string]string), + } +} + +// BuildURL returns a url +func (request *CommonRequest) BuildURL() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + request.queries = "/?" + utils.GetURLFormedMap(request.QueryParams) + return url + request.queries +} + +// BuildStringToSign returns BuildStringToSign +func (request *CommonRequest) BuildStringToSign() (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.QueryParams { + signParams[key] = value + } + + for key, value := range request.BodyParams { + signParams[key] = value + } + stringToSign = utils.GetURLFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.Method + "&%2F&" + stringToSign + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go b/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go new file mode 100644 index 0000000000..ef489c11d9 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/response/common_response.go @@ -0,0 +1,53 @@ +package response + +import ( + "io" + "io/ioutil" + "net/http" +) + +var hookReadAll = func(fn func(r io.Reader) (b []byte, err error)) func(r io.Reader) (b []byte, err error) { + return fn +} + +// CommonResponse is for storing message of httpResponse +type CommonResponse struct { + httpStatus int + httpHeaders map[string][]string + httpContentString string + httpContentBytes []byte +} + +// ParseFromHTTPResponse assigns for CommonResponse, returns err when body is too large. +func (resp *CommonResponse) ParseFromHTTPResponse(httpResponse *http.Response) (err error) { + defer httpResponse.Body.Close() + body, err := hookReadAll(ioutil.ReadAll)(httpResponse.Body) + if err != nil { + return + } + resp.httpStatus = httpResponse.StatusCode + resp.httpHeaders = httpResponse.Header + resp.httpContentBytes = body + resp.httpContentString = string(body) + return +} + +// GetHTTPStatus returns httpStatus +func (resp *CommonResponse) GetHTTPStatus() int { + return resp.httpStatus +} + +// GetHTTPHeaders returns httpresponse's headers +func (resp *CommonResponse) GetHTTPHeaders() map[string][]string { + return resp.httpHeaders +} + +// GetHTTPContentString return body content as string +func (resp *CommonResponse) GetHTTPContentString() string { + return resp.httpContentString +} + +// GetHTTPContentBytes return body content as []byte +func (resp *CommonResponse) GetHTTPContentBytes() []byte { + return resp.httpContentBytes +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go new file mode 100644 index 0000000000..3e4310eca6 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/rsa_key_pair_credential.go @@ -0,0 +1,145 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +// RsaKeyPairCredential is a kind of credentials +type RsaKeyPairCredential struct { + *credentialUpdater + PrivateKey string + PublicKeyId string + SessionExpiration int + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type rsaKeyPairResponse struct { + SessionAccessKey *sessionAccessKey `json:"SessionAccessKey" xml:"SessionAccessKey"` +} + +type sessionAccessKey struct { + SessionAccessKeyId string `json:"SessionAccessKeyId" xml:"SessionAccessKeyId"` + SessionAccessKeySecret string `json:"SessionAccessKeySecret" xml:"SessionAccessKeySecret"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int, runtime *utils.Runtime) *RsaKeyPairCredential { + return &RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns RsaKeyPairCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *RsaKeyPairCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns RsaKeyPairCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *RsaKeyPairCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken is useless RsaKeyPairCredential +func (r *RsaKeyPairCredential) GetSecurityToken() (*string, error) { + return tea.String(""), nil +} + +// GetBearerToken is useless for RsaKeyPairCredential +func (r *RsaKeyPairCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns RsaKeyPairCredential's type +func (r *RsaKeyPairCredential) GetType() *string { + return tea.String("rsa_key_pair") +} + +func (r *RsaKeyPairCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + if r.runtime.Host != "" { + request.Domain = r.runtime.Host + } + request.Scheme = "HTTPS" + request.Method = "GET" + request.QueryParams["AccessKeyId"] = r.PublicKeyId + request.QueryParams["Action"] = "GenerateSessionAccessKey" + request.QueryParams["Format"] = "JSON" + if r.SessionExpiration > 0 { + if r.SessionExpiration >= 900 && r.SessionExpiration <= 3600 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.SessionExpiration) + } else { + err = errors.New("[InvalidParam]:Key Pair session duration should be in the range of 15min - 1Hr") + return + } + } else { + request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds) + } + request.QueryParams["SignatureMethod"] = "SHA256withRSA" + request.QueryParams["SignatureType"] = "PRIVATEKEY" + request.QueryParams["SignatureVersion"] = "1.0" + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["SignatureNonce"] = utils.GetUUID() + signature := utils.Sha256WithRsa(request.BuildStringToSign(), r.PrivateKey) + request.QueryParams["Signature"] = signature + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh KeyPair err: %s", err.Error()) + } + var resp *rsaKeyPairResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh KeyPair err: Json Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.SessionAccessKey == nil { + return fmt.Errorf("refresh KeyPair err: SessionAccessKey is empty") + } + sessionAccessKey := resp.SessionAccessKey + if sessionAccessKey.SessionAccessKeyId == "" || sessionAccessKey.SessionAccessKeySecret == "" || sessionAccessKey.Expiration == "" { + return fmt.Errorf("refresh KeyPair err: SessionAccessKeyId: %v, SessionAccessKeySecret: %v, Expiration: %v", sessionAccessKey.SessionAccessKeyId, sessionAccessKey.SessionAccessKeySecret, sessionAccessKey.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", sessionAccessKey.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: sessionAccessKey.SessionAccessKeyId, + AccessKeySecret: sessionAccessKey.SessionAccessKeySecret, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go new file mode 100644 index 0000000000..dd48dc9295 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/session_credential.go @@ -0,0 +1,7 @@ +package credentials + +type sessionCredential struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go new file mode 100644 index 0000000000..ba07dab498 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/sts_credential.go @@ -0,0 +1,43 @@ +package credentials + +import "github.com/alibabacloud-go/tea/tea" + +// StsTokenCredential is a kind of credentials +type StsTokenCredential struct { + AccessKeyId string + AccessKeySecret string + SecurityToken string +} + +func newStsTokenCredential(accessKeyId, accessKeySecret, securityToken string) *StsTokenCredential { + return &StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + SecurityToken: securityToken, + } +} + +// GetAccessKeyId reutrns StsTokenCredential's AccessKeyId +func (s *StsTokenCredential) GetAccessKeyId() (*string, error) { + return tea.String(s.AccessKeyId), nil +} + +// GetAccessSecret reutrns StsTokenCredential's AccessKeySecret +func (s *StsTokenCredential) GetAccessKeySecret() (*string, error) { + return tea.String(s.AccessKeySecret), nil +} + +// GetSecurityToken reutrns StsTokenCredential's SecurityToken +func (s *StsTokenCredential) GetSecurityToken() (*string, error) { + return tea.String(s.SecurityToken), nil +} + +// GetBearerToken is useless StsTokenCredential +func (s *StsTokenCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns StsTokenCredential's type +func (s *StsTokenCredential) GetType() *string { + return tea.String("sts") +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go new file mode 100644 index 0000000000..f31ba1e327 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/sts_role_arn_credential.go @@ -0,0 +1,163 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +const defaultDurationSeconds = 3600 + +// RAMRoleArnCredential is a kind of credentials +type RAMRoleArnCredential struct { + *credentialUpdater + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int + Policy string + sessionCredential *sessionCredential + runtime *utils.Runtime +} + +type ramRoleArnResponse struct { + Credentials *credentialsInResponse `json:"Credentials" xml:"Credentials"` +} + +type credentialsInResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newRAMRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int, runtime *utils.Runtime) *RAMRoleArnCredential { + return &RAMRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + Policy: policy, + credentialUpdater: new(credentialUpdater), + runtime: runtime, + } +} + +// GetAccessKeyId reutrns RamRoleArnCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetAccessKeyId() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns RamRoleArnCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetAccessKeySecret() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns RamRoleArnCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (r *RAMRoleArnCredential) GetSecurityToken() (*string, error) { + if r.sessionCredential == nil || r.needUpdateCredential() { + err := r.updateCredential() + if err != nil { + return tea.String(""), err + } + } + return tea.String(r.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless RamRoleArnCredential +func (r *RAMRoleArnCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns RamRoleArnCredential's type +func (r *RAMRoleArnCredential) GetType() *string { + return tea.String("ram_role_arn") +} + +func (r *RAMRoleArnCredential) updateCredential() (err error) { + if r.runtime == nil { + r.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.Domain = "sts.aliyuncs.com" + request.Scheme = "HTTPS" + request.Method = "GET" + request.QueryParams["AccessKeyId"] = r.AccessKeyId + request.QueryParams["Action"] = "AssumeRole" + request.QueryParams["Format"] = "JSON" + if r.RoleSessionExpiration > 0 { + if r.RoleSessionExpiration >= 900 && r.RoleSessionExpiration <= 3600 { + request.QueryParams["DurationSeconds"] = strconv.Itoa(r.RoleSessionExpiration) + } else { + err = errors.New("[InvalidParam]:Assume Role session duration should be in the range of 15min - 1Hr") + return + } + } else { + request.QueryParams["DurationSeconds"] = strconv.Itoa(defaultDurationSeconds) + } + request.QueryParams["RoleArn"] = r.RoleArn + if r.Policy != "" { + request.QueryParams["Policy"] = r.Policy + } + request.QueryParams["RoleSessionName"] = r.RoleSessionName + request.QueryParams["SignatureMethod"] = "HMAC-SHA1" + request.QueryParams["SignatureVersion"] = "1.0" + request.QueryParams["Version"] = "2015-04-01" + request.QueryParams["Timestamp"] = utils.GetTimeInFormatISO8601() + request.QueryParams["SignatureNonce"] = utils.GetUUID() + signature := utils.ShaHmac1(request.BuildStringToSign(), r.AccessKeySecret+"&") + request.QueryParams["Signature"] = signature + request.Headers["Host"] = request.Domain + request.Headers["Accept-Encoding"] = "identity" + request.URL = request.BuildURL() + content, err := doAction(request, r.runtime) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: %s", err.Error()) + } + var resp *ramRoleArnResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err: Json.Unmarshal fail: %s", err.Error()) + } + if resp == nil || resp.Credentials == nil { + return fmt.Errorf("refresh RoleArn sts token err: Credentials is empty") + } + respCredentials := resp.Credentials + if respCredentials.AccessKeyId == "" || respCredentials.AccessKeySecret == "" || respCredentials.SecurityToken == "" || respCredentials.Expiration == "" { + return fmt.Errorf("refresh RoleArn sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", respCredentials.AccessKeyId, respCredentials.AccessKeySecret, respCredentials.SecurityToken, respCredentials.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", respCredentials.Expiration) + r.lastUpdateTimestamp = time.Now().Unix() + r.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + r.sessionCredential = &sessionCredential{ + AccessKeyId: respCredentials.AccessKeyId, + AccessKeySecret: respCredentials.AccessKeySecret, + SecurityToken: respCredentials.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go new file mode 100644 index 0000000000..e8f303fc76 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/uri_credential.go @@ -0,0 +1,125 @@ +package credentials + +import ( + "encoding/json" + "fmt" + "os" + "time" + + "github.com/alibabacloud-go/tea/tea" + "github.com/aliyun/credentials-go/credentials/request" + "github.com/aliyun/credentials-go/credentials/utils" +) + +// URLCredential is a kind of credential +type URLCredential struct { + URL string + *credentialUpdater + *sessionCredential + runtime *utils.Runtime +} + +type URLResponse struct { + AccessKeyId string `json:"AccessKeyId" xml:"AccessKeyId"` + AccessKeySecret string `json:"AccessKeySecret" xml:"AccessKeySecret"` + SecurityToken string `json:"SecurityToken" xml:"SecurityToken"` + Expiration string `json:"Expiration" xml:"Expiration"` +} + +func newURLCredential(URL string) *URLCredential { + credentialUpdater := new(credentialUpdater) + if URL == "" { + URL = os.Getenv("ALIBABA_CLOUD_CREDENTIALS_URI") + } + return &URLCredential{ + URL: URL, + credentialUpdater: credentialUpdater, + } +} + +// GetAccessKeyId reutrns URLCredential's AccessKeyId +// if AccessKeyId is not exist or out of date, the function will update it. +func (e *URLCredential) GetAccessKeyId() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeyId, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeyId), nil +} + +// GetAccessSecret reutrns URLCredential's AccessKeySecret +// if AccessKeySecret is not exist or out of date, the function will update it. +func (e *URLCredential) GetAccessKeySecret() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.AccessKeySecret, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.AccessKeySecret), nil +} + +// GetSecurityToken reutrns URLCredential's SecurityToken +// if SecurityToken is not exist or out of date, the function will update it. +func (e *URLCredential) GetSecurityToken() (*string, error) { + if e.sessionCredential == nil || e.needUpdateCredential() { + err := e.updateCredential() + if err != nil { + if e.credentialExpiration > (int(time.Now().Unix()) - int(e.lastUpdateTimestamp)) { + return &e.sessionCredential.SecurityToken, nil + } + return tea.String(""), err + } + } + return tea.String(e.sessionCredential.SecurityToken), nil +} + +// GetBearerToken is useless for URLCredential +func (e *URLCredential) GetBearerToken() *string { + return tea.String("") +} + +// GetType reutrns URLCredential's type +func (e *URLCredential) GetType() *string { + return tea.String("credential_uri") +} + +func (e *URLCredential) updateCredential() (err error) { + if e.runtime == nil { + e.runtime = new(utils.Runtime) + } + request := request.NewCommonRequest() + request.URL = e.URL + request.Method = "GET" + content, err := doAction(request, e.runtime) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + } + var resp *URLResponse + err = json.Unmarshal(content, &resp) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err: Json Unmarshal fail: %s", err.Error()) + } + if resp.AccessKeyId == "" || resp.AccessKeySecret == "" || resp.SecurityToken == "" || resp.Expiration == "" { + return fmt.Errorf("refresh Ecs sts token err: AccessKeyId: %s, AccessKeySecret: %s, SecurityToken: %s, Expiration: %s", resp.AccessKeyId, resp.AccessKeySecret, resp.SecurityToken, resp.Expiration) + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", resp.Expiration) + e.lastUpdateTimestamp = time.Now().Unix() + e.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + e.sessionCredential = &sessionCredential{ + AccessKeyId: resp.AccessKeyId, + AccessKeySecret: resp.AccessKeySecret, + SecurityToken: resp.SecurityToken, + } + + return +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go new file mode 100644 index 0000000000..d4a27c9cd9 --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/runtime.go @@ -0,0 +1,35 @@ +package utils + +import ( + "context" + "net" + "time" +) + +// Runtime is for setting timeout, proxy and host +type Runtime struct { + ReadTimeout int + ConnectTimeout int + Proxy string + Host string +} + +// NewRuntime returns a Runtime +func NewRuntime(readTimeout, connectTimeout int, proxy string, host string) *Runtime { + return &Runtime{ + ReadTimeout: readTimeout, + ConnectTimeout: connectTimeout, + Proxy: proxy, + Host: host, + } +} + +// Timeout is for connect Timeout +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} diff --git a/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go new file mode 100644 index 0000000000..7468407fbc --- /dev/null +++ b/vendor/github.com/aliyun/credentials-go/credentials/utils/utils.go @@ -0,0 +1,146 @@ +package utils + +import ( + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "hash" + "io" + rand2 "math/rand" + "net/url" + "time" +) + +type uuid [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +var hookRead = func(fn func(p []byte) (n int, err error)) func(p []byte) (n int, err error) { + return fn +} + +var hookRSA = func(fn func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error)) func(rand io.Reader, priv *rsa.PrivateKey, hash crypto.Hash, hashed []byte) ([]byte, error) { + return fn +} + +// GetUUID returns a uuid +func GetUUID() (uuidHex string) { + uuid := newUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +// RandStringBytes returns a rand string +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand2.Intn(len(letterBytes))] + } + return string(b) +} + +// ShaHmac1 return a string which has been hashed +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +// Sha256WithRsa return a string which has been hashed with Rsa +func Sha256WithRsa(source, secret string) string { + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := hookRSA(rsa.SignPKCS1v15)(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} + +// GetMD5Base64 returns a string which has been base64 +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +// GetTimeInFormatISO8601 returns a time string +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +// GetURLFormedMap returns a url encoded string +func GetURLFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func newUUID() uuid { + ns := uuid{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns uuid, name string) uuid { + u := uuid{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := hookRead(rand.Read)(dest); err != nil { + panic(err) + } +} + +func (u uuid) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index b6b30537b1..652d6aa727 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,1497 @@ +# Release (2022-09-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.19.0](service/cognitoidentityprovider/CHANGELOG.md#v1190-2022-09-02) + * **Feature**: This release adds a new "AuthSessionValidity" field to the UserPoolClient in Cognito. Application admins can configure this value for their users' authentication duration, which is currently fixed at 3 minutes, up to 15 minutes. Setting this field will also apply to the SMS MFA authentication flow. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.29.0](service/connect/CHANGELOG.md#v1290-2022-09-02) + * **Feature**: This release adds search APIs for Routing Profiles and Queues, which can be used to search for those resources within a Connect Instance. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.19.0](service/mediapackage/CHANGELOG.md#v1190-2022-09-02) + * **Feature**: Added support for AES_CTR encryption to CMAF origin endpoints +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.41.0](service/sagemaker/CHANGELOG.md#v1410-2022-09-02) + * **Feature**: This release enables administrators to attribute user activity and API calls from Studio notebooks, Data Wrangler and Canvas to specific users even when users share the same execution IAM role. ExecutionRoleIdentityConfig at Sagemaker domain level enables this feature. + +# Release (2022-09-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.11](service/codegurureviewer/CHANGELOG.md#v11611-2022-09-01) + * **Documentation**: Documentation updates to fix formatting issues in CLI and SDK documentation. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.0.0](service/controltower/CHANGELOG.md#v100-2022-09-01) + * **Release**: New AWS service client module + * **Feature**: This release contains the first SDK for AWS Control Tower. It introduces a new set of APIs: EnableControl, DisableControl, GetControlOperation, and ListEnabledControls. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.10](service/route53/CHANGELOG.md#v12110-2022-09-01) + * **Documentation**: Documentation updates for Amazon Route 53. + +# Release (2022-08-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.2](service/cloudfront/CHANGELOG.md#v1202-2022-08-31) + * **Documentation**: Update API documentation for CloudFront origin access control (OAC) +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.0](service/identitystore/CHANGELOG.md#v1150-2022-08-31) + * **Feature**: Expand IdentityStore API to support Create, Read, Update, Delete and Get operations for User, Group and GroupMembership resources. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.13.0](service/iotthingsgraph/CHANGELOG.md#v1130-2022-08-31) + * **Feature**: This release deprecates all APIs of the ThingsGraph service +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.18.0](service/ivs/CHANGELOG.md#v1180-2022-08-31) + * **Feature**: IVS Merge Fragmented Streams. This release adds support for recordingReconnectWindow field in IVS recordingConfigurations. For more information see https://docs.aws.amazon.com/ivs/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.12](service/rdsdata/CHANGELOG.md#v11212-2022-08-31) + * **Documentation**: Documentation updates for RDS Data API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.40.0](service/sagemaker/CHANGELOG.md#v1400-2022-08-31) + * **Feature**: SageMaker Inference Recommender now accepts Inference Recommender fields: Domain, Task, Framework, SamplePayloadUrl, SupportedContentTypes, SupportedInstanceTypes, directly in our CreateInferenceRecommendationsJob API through ContainerConfig + +# Release (2022-08-30) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.17.0](service/greengrassv2/CHANGELOG.md#v1170-2022-08-30) + * **Feature**: Adds topologyFilter to ListInstalledComponentsRequest which allows filtration of components by ROOT or ALL (including root and dependency components). Adds lastStatusChangeTimestamp to ListInstalledComponents response to show the last time a component changed state on a device. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.15](service/identitystore/CHANGELOG.md#v11415-2022-08-30) + * **Documentation**: Documentation updates for the Identity Store CLI Reference. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.15.0](service/lookoutequipment/CHANGELOG.md#v1150-2022-08-30) + * **Feature**: This release adds new apis for providing labels. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.23.0](service/macie2/CHANGELOG.md#v1230-2022-08-30) + * **Feature**: This release of the Amazon Macie API adds support for using allow lists to define specific text and text patterns to ignore when inspecting data sources for sensitive data. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.19](service/sso/CHANGELOG.md#v11119-2022-08-30) + * **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.7](service/ssoadmin/CHANGELOG.md#v1157-2022-08-30) + * **Documentation**: Documentation updates for the AWS IAM Identity Center CLI Reference. + +# Release (2022-08-29) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.9](service/fsx/CHANGELOG.md#v1249-2022-08-29) + * **Documentation**: Documentation updates for Amazon FSx for NetApp ONTAP. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.11.0](service/voiceid/CHANGELOG.md#v1110-2022-08-29) + * **Feature**: Amazon Connect Voice ID now detects voice spoofing. When a prospective fraudster tries to spoof caller audio using audio playback or synthesized speech, Voice ID will return a risk score and outcome to indicate the how likely it is that the voice is spoofed. + +# Release (2022-08-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.18.0](service/mediapackage/CHANGELOG.md#v1180-2022-08-26) + * **Feature**: This release adds Ads AdTriggers and AdsOnDeliveryRestrictions to describe calls for CMAF endpoints on MediaPackage. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.1](service/rds/CHANGELOG.md#v1251-2022-08-26) + * **Documentation**: Removes support for RDS Custom from DBInstanceClass in ModifyDBInstance + +# Release (2022-08-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.13](service/elasticloadbalancingv2/CHANGELOG.md#v11813-2022-08-25) + * **Documentation**: Documentation updates for ELBv2. Gateway Load Balancer now supports Configurable Flow Stickiness, enabling you to configure the hashing used to maintain stickiness of flows to a specific target appliance. +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.15.0](service/gamelift/CHANGELOG.md#v1150-2022-08-25) + * **Feature**: This release adds support for eight EC2 local zones as fleet locations; Atlanta, Chicago, Dallas, Denver, Houston, Kansas City (us-east-1-mci-1a), Los Angeles, and Phoenix. It also adds support for C5d, C6a, C6i, and R5d EC2 instance families. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.22.0](service/iotwireless/CHANGELOG.md#v1220-2022-08-25) + * **Feature**: This release includes a new feature for the customers to enable the LoRa gateways to send out beacons for Class B devices and an option to select one or more gateways for Class C devices when sending the LoRaWAN downlink messages. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.13](service/ivschat/CHANGELOG.md#v1013-2022-08-25) + * **Documentation**: Documentation change for IVS Chat API Reference. Doc-only update to add a paragraph on ARNs to the Welcome section. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.8.0](service/panorama/CHANGELOG.md#v180-2022-08-25) + * **Feature**: Support sorting and filtering in ListDevices API, and add more fields to device listings and single device detail +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.0](service/ssooidc/CHANGELOG.md#v1130-2022-08-25) + * **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# Release (2022-08-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.0](service/cloudfront/CHANGELOG.md#v1200-2022-08-24) + * **Feature**: Adds support for CloudFront origin access control (OAC), making it possible to restrict public access to S3 bucket origins in all AWS Regions, those with SSE-KMS, and more. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.25.0](service/configservice/CHANGELOG.md#v1250-2022-08-24) + * **Feature**: AWS Config now supports ConformancePackTemplate documents in SSM Docs for the deployment and update of conformance packs. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.14](service/iam/CHANGELOG.md#v11814-2022-08-24) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.1](service/ivs/CHANGELOG.md#v1171-2022-08-24) + * **Documentation**: Documentation Change for IVS API Reference - Doc-only update to type field description for CreateChannel and UpdateChannel actions and for Channel data type. Also added Amazon Resource Names (ARNs) paragraph to Welcome section. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.24.0](service/quicksight/CHANGELOG.md#v1240-2022-08-24) + * **Feature**: Added a new optional property DashboardVisual under ExperienceConfiguration parameter of GenerateEmbedUrlForAnonymousUser and GenerateEmbedUrlForRegisteredUser API operations. This supports embedding of specific visuals in QuickSight dashboards. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.5](service/transfer/CHANGELOG.md#v1215-2022-08-24) + * **Documentation**: Documentation updates for AWS Transfer Family + +# Release (2022-08-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.0](service/rds/CHANGELOG.md#v1250-2022-08-23) + * **Feature**: RDS for Oracle supports Oracle Data Guard switchover and read replica backups. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.5](service/ssoadmin/CHANGELOG.md#v1155-2022-08-23) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# Release (2022-08-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.5](service/docdb/CHANGELOG.md#v1195-2022-08-22) + * **Documentation**: Update document for volume clone +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.54.0](service/ec2/CHANGELOG.md#v1540-2022-08-22) + * **Feature**: R6a instances are powered by 3rd generation AMD EPYC (Milan) processors delivering all-core turbo frequency of 3.6 GHz. C6id, M6id, and R6id instances are powered by 3rd generation Intel Xeon Scalable processor (Ice Lake) delivering all-core turbo frequency of 3.5 GHz. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.23.0](service/forecast/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: releasing What-If Analysis APIs and update ARN regex pattern to be more strict in accordance with security recommendation +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.12.0](service/forecastquery/CHANGELOG.md#v1120-2022-08-22) + * **Feature**: releasing What-If Analysis APIs +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.24.0](service/iotsitewise/CHANGELOG.md#v1240-2022-08-22) + * **Feature**: Enable non-unique asset names under different hierarchies +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.23.0](service/lexmodelsv2/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: This release introduces a new feature to stop a running BotRecommendation Job for Automated Chatbot Designer. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.0](service/securityhub/CHANGELOG.md#v1230-2022-08-22) + * **Feature**: Added new resource details objects to ASFF, including resources for AwsBackupBackupVault, AwsBackupBackupPlan and AwsBackupRecoveryPoint. Added FixAvailable, FixedInVersion and Remediation to Vulnerability. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.0.0](service/supportapp/CHANGELOG.md#v100-2022-08-22) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for the AWS Support App in Slack. + +# Release (2022-08-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.28.0](service/connect/CHANGELOG.md#v1280-2022-08-19) + * **Feature**: This release adds SearchSecurityProfiles API which can be used to search for Security Profile resources within a Connect Instance. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.12](service/ivschat/CHANGELOG.md#v1012-2022-08-19) + * **Documentation**: Documentation Change for IVS Chat API Reference - Doc-only update to change text/description for tags field. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.33.0](service/kendra/CHANGELOG.md#v1330-2022-08-19) + * **Feature**: This release adds support for a new authentication type - Personal Access Token (PAT) for confluence server. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.17.0](service/lookoutmetrics/CHANGELOG.md#v1170-2022-08-19) + * **Feature**: This release is to make GetDataQualityMetrics API publicly available. + +# Release (2022-08-18) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.1.0](service/chimesdkmediapipelines/CHANGELOG.md#v110-2022-08-18) + * **Feature**: The Amazon Chime SDK now supports live streaming of real-time video from the Amazon Chime SDK sessions to streaming platforms such as Amazon IVS and Amazon Elemental MediaLive. We have also added support for concatenation to create a single media capture file. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.21.0](service/cloudwatch/CHANGELOG.md#v1210-2022-08-18) + * **Feature**: Add support for managed Contributor Insights Rules +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.4](service/cognitoidentityprovider/CHANGELOG.md#v1184-2022-08-18) + * **Documentation**: This change is being made simply to fix the public documentation based on the models. We have included the PasswordChange and ResendCode events, along with the Pass, Fail and InProgress status. We have removed the Success and Failure status which are never returned by our APIs. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.16.0](service/dynamodb/CHANGELOG.md#v1160-2022-08-18) + * **Feature**: This release adds support for importing data from S3 into a new DynamoDB table +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.53.0](service/ec2/CHANGELOG.md#v1530-2022-08-18) + * **Feature**: This release adds support for VPN log options , a new feature allowing S2S VPN connections to send IKE activity logs to CloudWatch Logs +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.15.0](service/networkmanager/CHANGELOG.md#v1150-2022-08-18) + * **Feature**: Add TransitGatewayPeeringAttachmentId property to TransitGatewayPeering Model + +# Release (2022-08-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.15.0](service/appmesh/CHANGELOG.md#v1150-2022-08-17) + * **Feature**: AWS App Mesh release to support Multiple Listener and Access Log Format feature +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.1.0](service/connectcampaigns/CHANGELOG.md#v110-2022-08-17) + * **Feature**: Updated exceptions for Amazon Connect Outbound Campaign api's. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.32.0](service/kendra/CHANGELOG.md#v1320-2022-08-17) + * **Feature**: This release adds Zendesk connector (which allows you to specify Zendesk SAAS platform as data source), Proxy Support for Sharepoint and Confluence Server (which allows you to specify the proxy configuration if proxy is required to connect to your Sharepoint/Confluence Server as data source). +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.17.0](service/lakeformation/CHANGELOG.md#v1170-2022-08-17) + * **Feature**: This release adds a new API support "AssumeDecoratedRoleWithSAML" and also release updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.24.0](service/lambda/CHANGELOG.md#v1240-2022-08-17) + * **Feature**: Added support for customization of Consumer Group ID for MSK and Kafka Event Source Mappings. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.22.0](service/lexmodelsv2/CHANGELOG.md#v1220-2022-08-17) + * **Feature**: This release introduces support for enhanced conversation design with the ability to define custom conversation flows with conditional branching and new bot responses. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.24.0](service/rds/CHANGELOG.md#v1240-2022-08-17) + * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) for RDS Aurora database clusters. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.18](service/secretsmanager/CHANGELOG.md#v11518-2022-08-17) + * **Documentation**: Documentation updates for Secrets Manager. + +# Release (2022-08-16) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.20.0](service/rekognition/CHANGELOG.md#v1200-2022-08-16) + * **Feature**: This release adds APIs which support copying an Amazon Rekognition Custom Labels model and managing project policies across AWS account. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.12](service/servicecatalog/CHANGELOG.md#v11412-2022-08-16) + * **Documentation**: Documentation updates for Service Catalog + +# Release (2022-08-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.19.0](service/cloudfront/CHANGELOG.md#v1190-2022-08-15) + * **Feature**: Adds Http 3 support to distributions +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.13](service/identitystore/CHANGELOG.md#v11413-2022-08-15) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.17](service/sso/CHANGELOG.md#v11117-2022-08-15) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.9.0](service/wisdom/CHANGELOG.md#v190-2022-08-15) + * **Feature**: This release introduces a new API PutFeedback that allows submitting feedback to Wisdom on content relevance. + +# Release (2022-08-14) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.17.0](config/CHANGELOG.md#v1170-2022-08-14) + * **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. +* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.15.0](service/amp/CHANGELOG.md#v1150-2022-08-14) + * **Feature**: This release adds log APIs that allow customers to manage logging for their Amazon Managed Service for Prometheus workspaces. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.0](service/chimesdkmessaging/CHANGELOG.md#v1110-2022-08-14) + * **Feature**: The Amazon Chime SDK now supports channels with up to one million participants with elastic channels. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.0](service/ivs/CHANGELOG.md#v1170-2022-08-14) + * **Feature**: Updates various list api MaxResults ranges +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.12.0](service/personalizeruntime/CHANGELOG.md#v1120-2022-08-14) + * **Feature**: This release provides support for promotions in AWS Personalize runtime. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.6](service/rds/CHANGELOG.md#v1236-2022-08-14) + * **Documentation**: Adds support for RDS Custom to DBInstanceClass in ModifyDBInstance + +# Release (2022-08-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.0.0](service/backupstorage/CHANGELOG.md#v100-2022-08-11) + * **Release**: New AWS service client module + * **Feature**: This is the first public release of AWS Backup Storage. We are exposing some previously-internal APIs for use by external services. These APIs are not meant to be used directly by customers. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.30.0](service/glue/CHANGELOG.md#v1300-2022-08-11) + * **Feature**: Add support for Python 3.9 AWS Glue Python Shell jobs +* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.0.0](service/privatenetworks/CHANGELOG.md#v100-2022-08-11) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Private 5G. AWS Private 5G is a managed service that makes it easy to deploy, operate, and scale your own private mobile network at your on-premises location. + +# Release (2022-08-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.16.0](config/CHANGELOG.md#v1160-2022-08-10) + * **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. +* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.12.0](service/dlm/CHANGELOG.md#v1120-2022-08-10) + * **Feature**: This release adds support for excluding specific data (non-boot) volumes from multi-volume snapshot sets created by snapshot lifecycle policies +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.52.0](service/ec2/CHANGELOG.md#v1520-2022-08-10) + * **Feature**: This release adds support for excluding specific data (non-root) volumes from multi-volume snapshot sets created from instances. + +# Release (2022-08-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.20.0](service/cloudwatch/CHANGELOG.md#v1200-2022-08-09) + * **Feature**: Various quota increases related to dimensions and custom metrics +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.18.0](service/location/CHANGELOG.md#v1180-2022-08-09) + * **Feature**: Amazon Location Service now allows circular geofences in BatchPutGeofence, PutGeofence, and GetGeofence APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.39.0](service/sagemaker/CHANGELOG.md#v1390-2022-08-09) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying multiple alternate EC2 instance types to make tuning jobs more robust when the preferred instance type is not available due to insufficient capacity. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.13.0](service/sagemakera2iruntime/CHANGELOG.md#v1130-2022-08-09) + * **Feature**: Fix bug with parsing ISO-8601 CreationTime in Java SDK in DescribeHumanLoop + +# Release (2022-08-08) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.9 + * **Bug Fix**: aws/signer/v4: Fixes a panic in SDK's handling of endpoint URLs with ports by correcting how URL path is parsed from opaque URLs. Fixes [#1294](https://github.com/aws/aws-sdk-go-v2/issues/1294). +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.29.0](service/glue/CHANGELOG.md#v1290-2022-08-08) + * **Feature**: Add an option to run non-urgent or non-time sensitive Glue Jobs on spare capacity +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.10](service/identitystore/CHANGELOG.md#v11410-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.21.0](service/iotwireless/CHANGELOG.md#v1210-2022-08-08) + * **Feature**: AWS IoT Wireless release support for sidewalk data reliability. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.17.0](service/pinpoint/CHANGELOG.md#v1170-2022-08-08) + * **Feature**: Adds support for Advance Quiet Time in Journeys. Adds RefreshOnSegmentUpdate and WaitForQuietTime to JourneyResponse. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.2](service/quicksight/CHANGELOG.md#v1232-2022-08-08) + * **Documentation**: A series of documentation updates to the QuickSight API reference. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.14](service/sso/CHANGELOG.md#v11114-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.2](service/ssoadmin/CHANGELOG.md#v1152-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.12.12](service/ssooidc/CHANGELOG.md#v11212-2022-08-08) + * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# Release (2022-08-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.13.0](service/chimesdkmeetings/CHANGELOG.md#v1130-2022-08-04) + * **Feature**: Adds support for Tags on Amazon Chime SDK WebRTC sessions +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.24.0](service/configservice/CHANGELOG.md#v1240-2022-08-04) + * **Feature**: Add resourceType enums for Athena, GlobalAccelerator, Detective and EC2 types +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.3](service/databasemigrationservice/CHANGELOG.md#v1213-2022-08-04) + * **Documentation**: Documentation updates for Database Migration Service (DMS). +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.28.0](service/iot/CHANGELOG.md#v1280-2022-08-04) + * **Feature**: The release is to support attach a provisioning template to CACert for JITP function, Customer now doesn't have to hardcode a roleArn and templateBody during register a CACert to enable JITP. + +# Release (2022-08-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.0](service/cognitoidentityprovider/CHANGELOG.md#v1180-2022-08-03) + * **Feature**: Add a new exception type, ForbiddenException, that is returned when request is not allowed +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.0](service/wafv2/CHANGELOG.md#v1220-2022-08-03) + * **Feature**: You can now associate an AWS WAF web ACL with an Amazon Cognito user pool. + +# Release (2022-08-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.0.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v100-2022-08-02) + * **Release**: New AWS service client module + * **Feature**: This release supports user based subscription for Microsoft Visual Studio Professional and Enterprise on EC2. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.21.0](service/personalize/CHANGELOG.md#v1210-2022-08-02) + * **Feature**: This release adds support for incremental bulk ingestion for the Personalize CreateDatasetImportJob API. + +# Release (2022-08-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.1](service/configservice/CHANGELOG.md#v1231-2022-08-01) + * **Documentation**: Documentation update for PutConfigRule and PutOrganizationConfigRule +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.22.0](service/workspaces/CHANGELOG.md#v1220-2022-08-01) + * **Feature**: This release introduces ModifySamlProperties, a new API that allows control of SAML properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return SAML properties in its responses. + +# Release (2022-07-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.51.0](service/ec2/CHANGELOG.md#v1510-2022-07-29) + * **Feature**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.4](service/fsx/CHANGELOG.md#v1244-2022-07-29) + * **Documentation**: Documentation updates for Amazon FSx +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.17.0](service/shield/CHANGELOG.md#v1170-2022-07-29) + * **Feature**: AWS Shield Advanced now supports filtering for ListProtections and ListProtectionGroups. + +# Release (2022-07-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.1](service/ec2/CHANGELOG.md#v1501-2022-07-28) + * **Documentation**: Documentation updates for VM Import/Export. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.16.0](service/elasticsearchservice/CHANGELOG.md#v1160-2022-07-28) + * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.0](service/lookoutvision/CHANGELOG.md#v1140-2022-07-28) + * **Feature**: This release introduces support for image segmentation models and updates CPU accelerator options for models hosted on edge devices. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.10.0](service/opensearch/CHANGELOG.md#v1100-2022-07-28) + * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage. + +# Release (2022-07-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.20.0](service/auditmanager/CHANGELOG.md#v1200-2022-07-27) + * **Feature**: This release adds an exceeded quota exception to several APIs. We added a ServiceQuotaExceededException for the following operations: CreateAssessment, CreateControl, CreateAssessmentFramework, and UpdateAssessmentStatus. +* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.21.0](service/chime/CHANGELOG.md#v1210-2022-07-27) + * **Feature**: Chime VoiceConnector will now support ValidateE911Address which will allow customers to prevalidate their addresses included in their SIP invites for emergency calling +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.0](service/configservice/CHANGELOG.md#v1230-2022-07-27) + * **Feature**: This release adds ListConformancePackComplianceScores API to support the new compliance score feature, which provides a percentage of the number of compliant rule-resource combinations in a conformance pack compared to the number of total possible rule-resource combinations in the conformance pack. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.14.0](service/globalaccelerator/CHANGELOG.md#v1140-2022-07-27) + * **Feature**: Global Accelerator now supports dual-stack accelerators, enabling support for IPv4 and IPv6 traffic. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.13.0](service/marketplacecatalog/CHANGELOG.md#v1130-2022-07-27) + * **Feature**: The SDK for the StartChangeSet API will now automatically set and use an idempotency token in the ClientRequestToken request parameter if the customer does not provide it. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.17.0](service/polly/CHANGELOG.md#v1170-2022-07-27) + * **Feature**: Amazon Polly adds new English and Hindi voice - Kajal. Kajal is available as Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.5](service/ssm/CHANGELOG.md#v1275-2022-07-27) + * **Documentation**: Adding doc updates for OpsCenter support in Service Setting actions. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.21.0](service/workspaces/CHANGELOG.md#v1210-2022-07-27) + * **Feature**: Added CreateWorkspaceImage API to create a new WorkSpace image from an existing WorkSpace. + +# Release (2022-07-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.15.0](service/appsync/CHANGELOG.md#v1150-2022-07-26) + * **Feature**: Adds support for a new API to evaluate mapping templates with mock data, allowing you to remotely unit test your AppSync resolvers and functions. +* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.16.0](service/detective/CHANGELOG.md#v1160-2022-07-26) + * **Feature**: Added the ability to get data source package information for the behavior graph. Graph administrators can now start (or stop) optional datasources on the behavior graph. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.15.0](service/guardduty/CHANGELOG.md#v1150-2022-07-26) + * **Feature**: Amazon GuardDuty introduces a new Malware Protection feature that triggers malware scan on selected EC2 instance resources, after the service detects a potentially malicious activity. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.13.0](service/lookoutvision/CHANGELOG.md#v1130-2022-07-26) + * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Lookout for Vision models. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.22.0](service/macie2/CHANGELOG.md#v1220-2022-07-26) + * **Feature**: This release adds support for retrieving (revealing) sample occurrences of sensitive data that Amazon Macie detects and reports in findings. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.1](service/rds/CHANGELOG.md#v1231-2022-07-26) + * **Documentation**: Adds support for using RDS Proxies with RDS for MariaDB databases. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.19.0](service/rekognition/CHANGELOG.md#v1190-2022-07-26) + * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Rekognition Custom Labels models. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.3](service/securityhub/CHANGELOG.md#v1223-2022-07-26) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.0](service/transfer/CHANGELOG.md#v1210-2022-07-26) + * **Feature**: AWS Transfer Family now supports Applicability Statement 2 (AS2), a network protocol used for the secure and reliable transfer of critical Business-to-Business (B2B) data over the public internet using HTTP/HTTPS as the transport mechanism. + +# Release (2022-07-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.6](service/autoscaling/CHANGELOG.md#v1236-2022-07-25) + * **Documentation**: Documentation update for Amazon EC2 Auto Scaling. + +# Release (2022-07-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.7.0](service/account/CHANGELOG.md#v170-2022-07-22) + * **Feature**: This release enables customers to manage the primary contact information for their AWS accounts. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/API_Operations.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.0](service/ec2/CHANGELOG.md#v1500-2022-07-22) + * **Feature**: Added support for EC2 M1 Mac instances. For more information, please visit aws.amazon.com/mac. +* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.15.0](service/iotdeviceadvisor/CHANGELOG.md#v1150-2022-07-22) + * **Feature**: Added new service feature (Early access only) - Long Duration Test, where customers can test the IoT device to observe how it behaves when the device is in operation for longer period. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.22.0](service/medialive/CHANGELOG.md#v1220-2022-07-22) + * **Feature**: Link devices now support remote rebooting. Link devices now support maintenance windows. Maintenance windows allow a Link device to install software updates without stopping the MediaLive channel. The channel will experience a brief loss of input from the device while updates are installed. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.0](service/rds/CHANGELOG.md#v1230-2022-07-22) + * **Feature**: This release adds the "ModifyActivityStream" API with support for audit policy state locking and unlocking. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.21.0](service/transcribe/CHANGELOG.md#v1210-2022-07-22) + * **Feature**: Remove unsupported language codes for StartTranscriptionJob and update VocabularyFileUri for UpdateMedicalVocabulary + +# Release (2022-07-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.18.0](service/athena/CHANGELOG.md#v1180-2022-07-21) + * **Feature**: This feature allows customers to retrieve runtime statistics for completed queries +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.19.0](service/cloudwatch/CHANGELOG.md#v1190-2022-07-21) + * **Feature**: Adding support for the suppression of Composite Alarm actions +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.1](service/databasemigrationservice/CHANGELOG.md#v1211-2022-07-21) + * **Documentation**: Documentation updates for Database Migration Service (DMS). +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.0](service/docdb/CHANGELOG.md#v1190-2022-07-21) + * **Feature**: Enable copy-on-write restore type +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.14.0](service/ec2instanceconnect/CHANGELOG.md#v1140-2022-07-21) + * **Feature**: This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs. +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.0](service/frauddetector/CHANGELOG.md#v1200-2022-07-21) + * **Feature**: The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.23.0](service/iotsitewise/CHANGELOG.md#v1230-2022-07-21) + * **Feature**: Added asynchronous API to ingest bulk historical and current data into IoT SiteWise. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.31.0](service/kendra/CHANGELOG.md#v1310-2022-07-21) + * **Feature**: Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.18.0](service/networkfirewall/CHANGELOG.md#v1180-2022-07-21) + * **Feature**: Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.1](service/rds/CHANGELOG.md#v1221-2022-07-21) + * **Documentation**: Adds support for creating an RDS Proxy for an RDS for MariaDB database. + +# Release (2022-07-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.11](service/acmpca/CHANGELOG.md#v11711-2022-07-20) + * **Documentation**: AWS Certificate Manager (ACM) Private Certificate Authority (PCA) documentation updates +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.27.0](service/iot/CHANGELOG.md#v1270-2022-07-20) + * **Feature**: GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API. + +# Release (2022-07-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.18.0](service/devopsguru/CHANGELOG.md#v1180-2022-07-19) + * **Feature**: Added new APIs for log anomaly detection feature. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.1](service/glue/CHANGELOG.md#v1281-2022-07-19) + * **Documentation**: Documentation updates for AWS Glue Job Timeout and Autoscaling +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.38.0](service/sagemaker/CHANGELOG.md#v1380-2022-07-19) + * **Feature**: Fixed an issue with cross account QueryLineage +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.12.0](service/sagemakeredge/CHANGELOG.md#v1120-2022-07-19) + * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.20.0](service/workspaces/CHANGELOG.md#v1200-2022-07-19) + * **Feature**: Increased the character limit of the login message from 850 to 2000 characters. + +# Release (2022-07-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.14.0](service/applicationdiscoveryservice/CHANGELOG.md#v1140-2022-07-18) + * **Feature**: Add AWS Agentless Collector details to the GetDiscoverySummary API response +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.1](service/ec2/CHANGELOG.md#v1491-2022-07-18) + * **Documentation**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.22.0](service/elasticache/CHANGELOG.md#v1220-2022-07-18) + * **Feature**: Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.18.0](service/kms/CHANGELOG.md#v1180-2022-07-18) + * **Feature**: Added support for the SM2 KeySpec in China Partition Regions +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.17.0](service/mediapackage/CHANGELOG.md#v1170-2022-07-18) + * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2 +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.37.0](service/sagemaker/CHANGELOG.md#v1370-2022-07-18) + * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.0](service/ssoadmin/CHANGELOG.md#v1150-2022-07-18) + * **Feature**: AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set. + +# Release (2022-07-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.3](service/datasync/CHANGELOG.md#v1183-2022-07-15) + * **Documentation**: Documentation updates for AWS DataSync regarding configuring Amazon FSx for ONTAP location security groups and SMB user permissions. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.7.0](service/drs/CHANGELOG.md#v170-2022-07-15) + * **Feature**: Changed existing APIs to allow choosing a dynamic volume type for replicating volumes, to reduce costs for customers. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.8.0](service/evidently/CHANGELOG.md#v180-2022-07-15) + * **Feature**: This release adds support for the new segmentation feature. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.21.0](service/wafv2/CHANGELOG.md#v1210-2022-07-15) + * **Feature**: This SDK release provide customers ability to add sensitivity level for WAF SQLI Match Statements. + +# Release (2022-07-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.17.0](service/athena/CHANGELOG.md#v1170-2022-07-14) + * **Feature**: This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string. +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.13.0](service/codeartifact/CHANGELOG.md#v1130-2022-07-14) + * **Feature**: This release introduces Package Origin Controls, a mechanism used to counteract Dependency Confusion attacks. Adds two new APIs, PutPackageOriginConfiguration and DescribePackage, and updates the ListPackage, DescribePackageVersion and ListPackageVersion APIs in support of the feature. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.22.0](service/configservice/CHANGELOG.md#v1220-2022-07-14) + * **Feature**: Update ResourceType enum with values for Route53Resolver, Batch, DMS, Workspaces, Stepfunctions, SageMaker, ElasticLoadBalancingV2, MSK types +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.0](service/ec2/CHANGELOG.md#v1490-2022-07-14) + * **Feature**: This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.18.0](service/fms/CHANGELOG.md#v1180-2022-07-14) + * **Feature**: Adds support for strict ordering in stateful rule groups in Network Firewall policies. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.0](service/glue/CHANGELOG.md#v1280-2022-07-14) + * **Feature**: This release adds an additional worker type for Glue Streaming jobs. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.7.0](service/inspector2/CHANGELOG.md#v170-2022-07-14) + * **Feature**: This release adds support for Inspector V2 scan configurations through the get and update configuration APIs. Currently this allows configuring ECR automated re-scan duration to lifetime or 180 days or 30 days. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.30.0](service/kendra/CHANGELOG.md#v1300-2022-07-14) + * **Feature**: This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.13.0](service/nimble/CHANGELOG.md#v1130-2022-07-14) + * **Feature**: Amazon Nimble Studio adds support for IAM-based access to AWS resources for Nimble Studio components and custom studio components. Studio Component scripts use these roles on Nimble Studio workstation to mount filesystems, access S3 buckets, or other configured resources in the Studio's AWS account +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.22.0](service/outposts/CHANGELOG.md#v1220-2022-07-14) + * **Feature**: This release adds the ShipmentInformation and AssetInformationList fields to the GetOrder API response. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.36.0](service/sagemaker/CHANGELOG.md#v1360-2022-07-14) + * **Feature**: This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning + +# Release (2022-07-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.13.0](service/appconfig/CHANGELOG.md#v1130-2022-07-13) + * **Feature**: Adding Create, Get, Update, Delete, and List APIs for new two new resources: Extensions and ExtensionAssociations. + +# Release (2022-07-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.14.0](service/networkmanager/CHANGELOG.md#v1140-2022-07-12) + * **Feature**: This release adds general availability API support for AWS Cloud WAN. + +# Release (2022-07-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.48.0](service/ec2/CHANGELOG.md#v1480-2022-07-11) + * **Feature**: Build, manage, and monitor a unified global network that connects resources running across your cloud and on-premises environments using the AWS Cloud WAN APIs. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.0](service/redshift/CHANGELOG.md#v1260-2022-07-11) + * **Feature**: This release adds a new --snapshot-arn field for describe-cluster-snapshots, describe-node-configuration-options, restore-from-cluster-snapshot, authorize-snapshot-acsess, and revoke-snapshot-acsess APIs. It allows customers to give a Redshift snapshot ARN or a Redshift Serverless ARN as input. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.2](service/redshiftserverless/CHANGELOG.md#v122-2022-07-11) + * **Documentation**: Removed prerelease language for GA launch. + +# Release (2022-07-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.17.0](service/backup/CHANGELOG.md#v1170-2022-07-08) + * **Feature**: This release adds support for authentication using IAM user identity instead of passed IAM role, identified by excluding the IamRoleArn field in the StartRestoreJob API. This feature applies to only resource clients with a destructive restore nature (e.g. SAP HANA). + +# Release (2022-07-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.12.0](service/chimesdkmeetings/CHANGELOG.md#v1120-2022-07-07) + * **Feature**: Adds support for AppKeys and TenantIds in Amazon Chime SDK WebRTC sessions +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.0](service/databasemigrationservice/CHANGELOG.md#v1210-2022-07-07) + * **Feature**: New api to migrate event subscriptions to event bridge rules +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.26.0](service/iot/CHANGELOG.md#v1260-2022-07-07) + * **Feature**: This release adds support to register a CA certificate without having to provide a verification certificate. This also allows multiple AWS accounts to register the same CA in the same region. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.20.0](service/iotwireless/CHANGELOG.md#v1200-2022-07-07) + * **Feature**: Adds 5 APIs: PutPositionConfiguration, GetPositionConfiguration, ListPositionConfigurations, UpdatePosition, GetPosition for the new Positioning Service feature which enables customers to configure solvers to calculate position of LoRaWAN devices, or specify position of LoRaWAN devices & gateways. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.35.0](service/sagemaker/CHANGELOG.md#v1350-2022-07-07) + * **Feature**: Heterogeneous clusters: the ability to launch training jobs with multiple instance types. This enables running component of the training job on the instance type that is most suitable for it. e.g. doing data processing and augmentation on CPU instances and neural network training on GPU instances + +# Release (2022-07-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.22.0](service/cloudformation/CHANGELOG.md#v1220-2022-07-06) + * **Feature**: My AWS Service (placeholder) - Add a new feature Account-level Targeting for StackSet operation +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.16.0](service/synthetics/CHANGELOG.md#v1160-2022-07-06) + * **Feature**: This release introduces Group feature, which enables users to group cross-region canaries. + +# Release (2022-07-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.5](service/configservice/CHANGELOG.md#v1215-2022-07-05) + * **Documentation**: Updating documentation service limits +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.21.0](service/lexmodelsv2/CHANGELOG.md#v1210-2022-07-05) + * **Feature**: This release introduces additional optional parameters "messageSelectionStrategy" to PromptSpecification, which enables the users to configure the bot to play messages in orderly manner. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.0](service/quicksight/CHANGELOG.md#v1230-2022-07-05) + * **Feature**: This release allows customers to programmatically create QuickSight accounts with Enterprise and Enterprise + Q editions. It also releases allowlisting domains for embedding QuickSight dashboards at runtime through the embedding APIs. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.0](service/rds/CHANGELOG.md#v1220-2022-07-05) + * **Feature**: Adds waiters support for DBCluster. +* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.0.0](service/rolesanywhere/CHANGELOG.md#v100-2022-07-05) + * **Release**: New AWS service client module + * **Feature**: IAM Roles Anywhere allows your workloads such as servers, containers, and applications to obtain temporary AWS credentials and use the same IAM roles and policies that you have configured for your AWS workloads to access AWS resources. +* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.19.0](service/sqs/CHANGELOG.md#v1190-2022-07-05) + * **Feature**: Adds support for the SQS client to automatically validate message checksums for SendMessage, SendMessageBatch, and ReceiveMessage. A DisableMessageChecksumValidation parameter has been added to the Options struct for SQS package. Setting this to true will disable the checksum validation. This can be set when creating a client, or per operation call. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.15.0](service/ssmincidents/CHANGELOG.md#v1150-2022-07-05) + * **Feature**: Adds support for tagging incident-record on creation by providing incident tags in the template within a response-plan. + +# Release (2022-07-01) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.20.0](service/databasemigrationservice/CHANGELOG.md#v1200-2022-07-01) + * **Feature**: Added new features for AWS DMS version 3.4.7 that includes new endpoint settings for S3, OpenSearch, Postgres, SQLServer and Oracle. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.5](service/rds/CHANGELOG.md#v1215-2022-07-01) + * **Documentation**: Adds support for additional retention periods to Performance Insights. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.27.0](service/s3/CHANGELOG.md#v1270-2022-07-01) + * **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076). + +# Release (2022-06-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.16.0](service/athena/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: This feature introduces the API support for Athena's parameterized query and BatchGetPreparedStatement API. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.18.0](service/customerprofiles/CHANGELOG.md#v1180-2022-06-30) + * **Feature**: This release adds the optional MinAllowedConfidenceScoreForMerging parameter to the CreateDomain, UpdateDomain, and GetAutoMergingPreview APIs in Customer Profiles. This parameter is used as a threshold to influence the profile auto-merging step of the Identity Resolution process. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.20.0](service/emr/CHANGELOG.md#v1200-2022-06-30) + * **Feature**: This release adds support for the ExecutionRoleArn parameter in the AddJobFlowSteps and DescribeStep APIs. Customers can use ExecutionRoleArn to specify the IAM role used for each job they submit using the AddJobFlowSteps API. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.27.0](service/glue/CHANGELOG.md#v1270-2022-06-30) + * **Feature**: This release adds tag as an input of CreateDatabase +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.29.0](service/kendra/CHANGELOG.md#v1290-2022-06-30) + * **Feature**: Amazon Kendra now provides a data source connector for alfresco +* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.13.0](service/mwaa/CHANGELOG.md#v1130-2022-06-30) + * **Feature**: Documentation updates for Amazon Managed Workflows for Apache Airflow. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.16.0](service/pricing/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: Documentation update for GetProducts Response. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.16.0](service/wellarchitected/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: Added support for UpdateGlobalSettings API. Added status filter to ListWorkloadShares and ListLensShares. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.16.0](service/workmail/CHANGELOG.md#v1160-2022-06-30) + * **Feature**: This release adds support for managing user availability configurations in Amazon WorkMail. + +# Release (2022-06-29) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.6 + * **Bug Fix**: Fix aws/signer/v4 to not double sign Content-Length header. Fixes [#1728](https://github.com/aws/aws-sdk-go-v2/issues/1728). Thanks to @matelang for creating the issue and PR. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.17.0](service/appstream/CHANGELOG.md#v1170-2022-06-29) + * **Feature**: Includes support for StreamingExperienceSettings in CreateStack and UpdateStack APIs +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.7](service/elasticloadbalancingv2/CHANGELOG.md#v1187-2022-06-29) + * **Documentation**: This release adds two attributes for ALB. One, helps to preserve the host header and the other helps to modify, preserve, or remove the X-Forwarded-For header in the HTTP request. +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.19.0](service/emr/CHANGELOG.md#v1190-2022-06-29) + * **Feature**: This release introduces additional optional parameter "Throughput" to VolumeSpecification to enable user to configure throughput for gp3 ebs volumes. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.21.0](service/medialive/CHANGELOG.md#v1210-2022-06-29) + * **Feature**: This release adds support for automatic renewal of MediaLive reservations at the end of each reservation term. Automatic renewal is optional. This release also adds support for labelling accessibility-focused audio and caption tracks in HLS outputs. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.0](service/redshiftserverless/CHANGELOG.md#v120-2022-06-29) + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.34.0](service/sagemaker/CHANGELOG.md#v1340-2022-06-29) + * **Feature**: This release adds: UpdateFeatureGroup, UpdateFeatureMetadata, DescribeFeatureMetadata APIs; FeatureMetadata type in Search API; LastModifiedTime, LastUpdateStatus, OnlineStoreTotalSizeBytes in DescribeFeatureGroup API. +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.14.0](service/translate/CHANGELOG.md#v1140-2022-06-29) + * **Feature**: Added ListLanguages API which can be used to list the languages supported by Translate. + +# Release (2022-06-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.0](service/datasync/CHANGELOG.md#v1180-2022-06-28) + * **Feature**: AWS DataSync now supports Amazon FSx for NetApp ONTAP locations. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.47.0](service/ec2/CHANGELOG.md#v1470-2022-06-28) + * **Feature**: This release adds a new spread placement group to EC2 Placement Groups: host level spread, which spread instances between physical hosts, available to Outpost customers only. CreatePlacementGroup and DescribePlacementGroups APIs were updated with a new parameter: SpreadLevel to support this feature. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.12.0](service/finspacedata/CHANGELOG.md#v1120-2022-06-28) + * **Feature**: Release new API GetExternalDataViewAccessDetails +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.16.0](service/polly/CHANGELOG.md#v1160-2022-06-28) + * **Feature**: Add 4 new neural voices - Pedro (es-US), Liam (fr-CA), Daniel (de-DE) and Arthur (en-GB). + +# Release (2022-06-24.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.13.7](service/emrcontainers/CHANGELOG.md#v1137-2022-06-242) + * **Bug Fix**: Fixes bug with incorrect modeled timestamp format + +# Release (2022-06-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.14.0](service/lookoutequipment/CHANGELOG.md#v1140-2022-06-23) + * **Feature**: This release adds visualizations to the scheduled inference results. Users will be able to see interference results, including diagnostic results from their running inference schedulers. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.1](service/mediaconvert/CHANGELOG.md#v1251-2022-06-23) + * **Documentation**: AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.15.0](service/mgn/CHANGELOG.md#v1150-2022-06-23) + * **Feature**: New and modified APIs for the Post-Migration Framework +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.6.0](service/migrationhubrefactorspaces/CHANGELOG.md#v160-2022-06-23) + * **Feature**: This release adds the new API UpdateRoute that allows route to be updated to ACTIVE/INACTIVE state. In addition, CreateRoute API will now allow users to create route in ACTIVE/INACTIVE state. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.33.0](service/sagemaker/CHANGELOG.md#v1330-2022-06-23) + * **Feature**: SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode. + +# Release (2022-06-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.8](service/apigateway/CHANGELOG.md#v1158-2022-06-22) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.15.0](service/pricing/CHANGELOG.md#v1150-2022-06-22) + * **Feature**: This release introduces 1 update to the GetProducts API. The serviceCode attribute is now required when you use the GetProductsRequest. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.20.0](service/transfer/CHANGELOG.md#v1200-2022-06-22) + * **Feature**: Until today, the service supported only RSA host keys and user keys. Now with this launch, Transfer Family has expanded the support for ECDSA and ED25519 host keys and user keys, enabling customers to support a broader set of clients by choosing RSA, ECDSA, and ED25519 host and user keys. + +# Release (2022-06-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.46.0](service/ec2/CHANGELOG.md#v1460-2022-06-21) + * **Feature**: This release adds support for Private IP VPNs, a new feature allowing S2S VPN connections to use private ip addresses as the tunnel outside ip address over Direct Connect as transport. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.9](service/ecs/CHANGELOG.md#v1189-2022-06-21) + * **Documentation**: Amazon ECS UpdateService now supports the following parameters: PlacementStrategies, PlacementConstraints and CapacityProviderStrategy. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.15.0](service/wellarchitected/CHANGELOG.md#v1150-2022-06-21) + * **Feature**: Adds support for lens tagging, Adds support for multiple helpful-resource urls and multiple improvement-plan urls. + +# Release (2022-06-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.14.0](service/directoryservice/CHANGELOG.md#v1140-2022-06-20) + * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD settings +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.17.7](service/kafka/CHANGELOG.md#v1177-2022-06-20) + * **Documentation**: Documentation updates to use Az Id during cluster creation. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.21.0](service/outposts/CHANGELOG.md#v1210-2022-06-20) + * **Feature**: This release adds the AssetLocation structure to the ListAssets response. AssetLocation includes the RackElevation for an Asset. + +# Release (2022-06-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.27.0](service/connect/CHANGELOG.md#v1270-2022-06-17) + * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable High volume outbound communications using attribute type HIGH_VOLUME_OUTBOUND on the specified Amazon Connect instance. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.0.0](service/connectcampaigns/CHANGELOG.md#v100-2022-06-17) + * **Release**: New AWS service client module + * **Feature**: Added Amazon Connect high volume outbound communications SDK. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.15.7](service/dynamodb/CHANGELOG.md#v1157-2022-06-17) + * **Documentation**: Doc only update for DynamoDB service +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.7](service/dynamodbstreams/CHANGELOG.md#v1137-2022-06-17) + * **Documentation**: Doc only update for DynamoDB service + +# Release (2022-06-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.16.0](service/redshiftdata/CHANGELOG.md#v1160-2022-06-16) + * **Feature**: This release adds a new --workgroup-name field to operations that connect to an endpoint. Customers can now execute queries against their serverless workgroups. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.1.0](service/redshiftserverless/CHANGELOG.md#v110-2022-06-16) + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.11](service/secretsmanager/CHANGELOG.md#v11511-2022-06-16) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.0](service/securityhub/CHANGELOG.md#v1220-2022-06-16) + * **Feature**: Added Threats field for security findings. Added new resource details for ECS Container, ECS Task, RDS SecurityGroup, Kinesis Stream, EC2 TransitGateway, EFS AccessPoint, CloudFormation Stack, CloudWatch Alarm, VPC Peering Connection and WAF Rules + +# Release (2022-06-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.11.0](service/finspacedata/CHANGELOG.md#v1110-2022-06-15) + * **Feature**: This release adds a new set of APIs, GetPermissionGroup, DisassociateUserFromPermissionGroup, AssociateUserToPermissionGroup, ListPermissionGroupsByUser, ListUsersByPermissionGroup. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.14.0](service/guardduty/CHANGELOG.md#v1140-2022-06-15) + * **Feature**: Adds finding fields available from GuardDuty Console. Adds FreeTrial related operations. Deprecates the use of various APIs related to Master Accounts and Replace them with Administrator Accounts. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.13.0](service/servicecatalogappregistry/CHANGELOG.md#v1130-2022-06-15) + * **Feature**: This release adds a new API ListAttributeGroupsForApplication that returns associated attribute groups of an application. In addition, the UpdateApplication and UpdateAttributeGroup APIs will not allow users to update the 'Name' attribute. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.19.0](service/workspaces/CHANGELOG.md#v1190-2022-06-15) + * **Feature**: Added new field "reason" to OperationNotSupportedException. Receiving this exception in the DeregisterWorkspaceDirectory API will now return a reason giving more context on the failure. + +# Release (2022-06-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.13.0](service/budgets/CHANGELOG.md#v1130-2022-06-14) + * **Feature**: Add a budgets ThrottlingException. Update the CostFilters value pattern. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.16.0](service/lookoutmetrics/CHANGELOG.md#v1160-2022-06-14) + * **Feature**: Adding filters to Alert and adding new UpdateAlert API. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.0](service/mediaconvert/CHANGELOG.md#v1250-2022-06-14) + * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. + +# Release (2022-06-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.20.0](service/outposts/CHANGELOG.md#v1200-2022-06-13) + * **Feature**: This release adds API operations AWS uses to install Outpost servers. + +# Release (2022-06-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.19.7](service/frauddetector/CHANGELOG.md#v1197-2022-06-10) + * **Documentation**: Documentation updates for Amazon Fraud Detector (AWSHawksNest) + +# Release (2022-06-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.11.0](service/chimesdkmeetings/CHANGELOG.md#v1110-2022-06-09) + * **Feature**: Adds support for live transcription in AWS GovCloud (US) Regions. + +# Release (2022-06-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.19.0](service/databasemigrationservice/CHANGELOG.md#v1190-2022-06-08) + * **Feature**: This release adds DMS Fleet Advisor APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to create and modify fleet advisor instances, and to collect and analyze information about the local data infrastructure. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.7](service/iam/CHANGELOG.md#v1187-2022-06-08) + * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM). +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.0.0](service/m2/CHANGELOG.md#v100-2022-06-08) + * **Release**: New AWS service client module + * **Feature**: AWS Mainframe Modernization service is a managed mainframe service and set of tools for planning, migrating, modernizing, and running mainframe workloads on AWS +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.17.0](service/neptune/CHANGELOG.md#v1170-2022-06-08) + * **Feature**: This release adds support for Neptune to be configured as a global database, with a primary DB cluster in one region, and up to five secondary DB clusters in other regions. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.25.0](service/redshift/CHANGELOG.md#v1250-2022-06-08) + * **Feature**: Adds new API GetClusterCredentialsWithIAM to return temporary credentials. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.0.0](service/redshiftserverless/CHANGELOG.md#v100-2022-06-08) + * **Release**: New AWS service client module + * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more. + +# Release (2022-06-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.19.0](service/auditmanager/CHANGELOG.md#v1190-2022-06-07) + * **Feature**: This release introduces 2 updates to the Audit Manager API. The roleType and roleArn attributes are now required when you use the CreateAssessment or UpdateAssessment operation. We also added a throttling exception to the RegisterAccount API operation. +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.19.0](service/costexplorer/CHANGELOG.md#v1190-2022-06-07) + * **Feature**: Added two new APIs to support cost allocation tags operations: ListCostAllocationTags, UpdateCostAllocationTagsStatus. + +# Release (2022-06-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.10.0](service/chimesdkmessaging/CHANGELOG.md#v1100-2022-06-06) + * **Feature**: This release adds support for searching channels by members via the SearchChannels API, removes required restrictions for Name and Mode in UpdateChannel API and enhances CreateChannel API by exposing member and moderator list as well as channel id as optional parameters. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.26.0](service/connect/CHANGELOG.md#v1260-2022-06-06) + * **Feature**: This release adds a new API, GetCurrentUserData, which returns real-time details about users' current activity. + +# Release (2022-06-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.16.0](service/applicationinsights/CHANGELOG.md#v1160-2022-06-02) + * **Feature**: Provide Account Level onboarding support through CFN/CLI +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.12.6](service/codeartifact/CHANGELOG.md#v1126-2022-06-02) + * **Documentation**: Documentation updates for CodeArtifact +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.25.0](service/connect/CHANGELOG.md#v1250-2022-06-02) + * **Feature**: This release adds the following features: 1) New APIs to manage (create, list, update) task template resources, 2) Updates to startTaskContact API to support task templates, and 3) new TransferContact API to programmatically transfer in-progress tasks via a contact flow. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.28.0](service/kendra/CHANGELOG.md#v1280-2022-06-02) + * **Feature**: Amazon Kendra now provides a data source connector for GitHub. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-github.html +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.14.0](service/proton/CHANGELOG.md#v1140-2022-06-02) + * **Feature**: Add new "Components" API to enable users to Create, Delete and Update AWS Proton components. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.10.0](service/voiceid/CHANGELOG.md#v1100-2022-06-02) + * **Feature**: Added a new attribute ServerSideEncryptionUpdateDetails to Domain and DomainSummary. + +# Release (2022-06-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.6.0](service/backupgateway/CHANGELOG.md#v160-2022-06-01) + * **Feature**: Adds GetGateway and UpdateGatewaySoftwareNow API and adds hypervisor name to UpdateHypervisor API +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.10.0](service/chimesdkmeetings/CHANGELOG.md#v1100-2022-06-01) + * **Feature**: Adds support for centrally controlling each participant's ability to send and receive audio, video and screen share within a WebRTC session. Attendee capabilities can be specified when the attendee is created and updated during the session with the new BatchUpdateAttendeeCapabilitiesExcept API. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.22.0](service/forecast/CHANGELOG.md#v1220-2022-06-01) + * **Feature**: Added Format field to Import and Export APIs in Amazon Forecast. Added TimeSeriesSelector to Create Forecast API. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.0](service/route53/CHANGELOG.md#v1210-2022-06-01) + * **Feature**: Add new APIs to support Route 53 IP Based Routing + +# Release (2022-05-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.17.0](service/cognitoidentityprovider/CHANGELOG.md#v1170-2022-05-31) + * **Feature**: Amazon Cognito now supports IP Address propagation for all unauthenticated APIs (e.g. SignUp, ForgotPassword). +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.6.0](service/drs/CHANGELOG.md#v160-2022-05-31) + * **Feature**: Changed existing APIs and added new APIs to accommodate using multiple AWS accounts with AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.22.0](service/iotsitewise/CHANGELOG.md#v1220-2022-05-31) + * **Feature**: This release adds the following new optional field to the IoT SiteWise asset resource: assetDescription. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.15.0](service/lookoutmetrics/CHANGELOG.md#v1150-2022-05-31) + * **Feature**: Adding backtest mode to detectors using the Cloudwatch data source. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.20.0](service/transcribe/CHANGELOG.md#v1200-2022-05-31) + * **Feature**: Amazon Transcribe now supports automatic language identification for multi-lingual audio in batch mode. + +# Release (2022-05-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.16.0](service/appflow/CHANGELOG.md#v1160-2022-05-27) + * **Feature**: Adding the following features/changes: Parquet output that preserves typing from the source connector, Failed executions threshold before deactivation for scheduled flows, increasing max size of access and refresh token from 2048 to 4096 +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.17.0](service/datasync/CHANGELOG.md#v1170-2022-05-27) + * **Feature**: AWS DataSync now supports TLS encryption in transit, file system policies and access points for EFS locations. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.1.0](service/emrserverless/CHANGELOG.md#v110-2022-05-27) + * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.32.0](service/sagemaker/CHANGELOG.md#v1320-2022-05-27) + * **Feature**: Amazon SageMaker Notebook Instances now allows configuration of Instance Metadata Service version and Amazon SageMaker Studio now supports G5 instance types. + +# Release (2022-05-26) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.45.0](service/ec2/CHANGELOG.md#v1450-2022-05-26) + * **Feature**: C7g instances, powered by the latest generation AWS Graviton3 processors, provide the best price performance in Amazon EC2 for compute-intensive workloads. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.0.0](service/emrserverless/CHANGELOG.md#v100-2022-05-26) + * **Release**: New AWS service client module + * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.21.0](service/forecast/CHANGELOG.md#v1210-2022-05-26) + * **Feature**: Introduced a new field in Auto Predictor as Time Alignment Boundary. It helps in aligning the timestamps generated during Forecast exports +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.22.0](service/lightsail/CHANGELOG.md#v1220-2022-05-26) + * **Feature**: Amazon Lightsail now supports the ability to configure a Lightsail Container Service to pull images from Amazon ECR private repositories in your account. + +# Release (2022-05-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.6](service/apigateway/CHANGELOG.md#v1156-2022-05-25) + * **Documentation**: Documentation updates for Amazon API Gateway +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.3](service/apprunner/CHANGELOG.md#v1123-2022-05-25) + * **Documentation**: Documentation-only update added for CodeConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.21.0](service/cloudformation/CHANGELOG.md#v1210-2022-05-25) + * **Feature**: Add a new parameter statusReason to DescribeStackSetOperation output for additional details +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.0](service/fsx/CHANGELOG.md#v1240-2022-05-25) + * **Feature**: This release adds root squash support to FSx for Lustre to restrict root level access from clients by mapping root users to a less-privileged user/group with limited permissions. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.14.0](service/lookoutmetrics/CHANGELOG.md#v1140-2022-05-25) + * **Feature**: Adding AthenaSourceConfig for MetricSet APIs to support Athena as a data source. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.31.0](service/sagemaker/CHANGELOG.md#v1310-2022-05-25) + * **Feature**: Amazon SageMaker Autopilot adds support for manually selecting features from the input dataset using the CreateAutoMLJob API. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.9](service/secretsmanager/CHANGELOG.md#v1159-2022-05-25) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.9.0](service/voiceid/CHANGELOG.md#v190-2022-05-25) + * **Feature**: VoiceID will now automatically expire Speakers if they haven't been accessed for Enrollment, Re-enrollment or Successful Auth for three years. The Speaker APIs now return a "LastAccessedAt" time for Speakers, and the EvaluateSession API returns "SPEAKER_EXPIRED" Auth Decision for EXPIRED Speakers. + +# Release (2022-05-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.16.0](service/cognitoidentityprovider/CHANGELOG.md#v1160-2022-05-24) + * **Feature**: Amazon Cognito now supports requiring attribute verification (ex. email and phone number) before update. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.44.0](service/ec2/CHANGELOG.md#v1440-2022-05-24) + * **Feature**: Stop Protection feature enables customers to protect their instances from accidental stop actions. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.4](service/ivschat/CHANGELOG.md#v104-2022-05-24) + * **Documentation**: Doc-only update. For MessageReviewHandler structure, added timeout period in the description of the fallbackResult field +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.24.0](service/mediaconvert/CHANGELOG.md#v1240-2022-05-24) + * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.13.0](service/networkmanager/CHANGELOG.md#v1130-2022-05-24) + * **Feature**: This release adds Multi Account API support for a TGW Global Network, to enable and disable AWSServiceAccess with AwsOrganizations for Network Manager service and dependency CloudFormation StackSets service. + +# Release (2022-05-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.21.0](service/elasticache/CHANGELOG.md#v1210-2022-05-23) + * **Feature**: Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.20.0](service/forecast/CHANGELOG.md#v1200-2022-05-23) + * **Feature**: New APIs for Monitor that help you understand how your predictors perform over time. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.20.0](service/personalize/CHANGELOG.md#v1200-2022-05-23) + * **Feature**: Adding modelMetrics as part of DescribeRecommender API response for Personalize. + +# Release (2022-05-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.15.7](service/cloudwatchlogs/CHANGELOG.md#v1157-2022-05-20) + * **Documentation**: Doc-only update to publish the new valid values for log retention +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.18.0](service/comprehend/CHANGELOG.md#v1180-2022-05-20) + * **Feature**: Comprehend releases 14 new entity types for DetectPiiEntities and ContainsPiiEntities APIs. + +# Release (2022-05-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.1.0](service/gamesparks/CHANGELOG.md#v110-2022-05-19) + * **Feature**: This release adds an optional DeploymentResult field in the responses of GetStageDeploymentIntegrationTests and ListStageDeploymentIntegrationTests APIs. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.13.0](service/lookoutmetrics/CHANGELOG.md#v1130-2022-05-19) + * **Feature**: In this release we added SnsFormat to SNSConfiguration to support human readable alert. + +# Release (2022-05-18) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.14.0](service/appmesh/CHANGELOG.md#v1140-2022-05-18) + * **Feature**: This release updates the existing Create and Update APIs for meshes and virtual nodes by adding a new IP preference field. This new IP preference field can be used to control the IP versions being used with the mesh and allows for IPv6 support within App Mesh. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.3](service/batch/CHANGELOG.md#v1183-2022-05-18) + * **Documentation**: Documentation updates for AWS Batch. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.16.0](service/greengrassv2/CHANGELOG.md#v1160-2022-05-18) + * **Feature**: This release adds the new DeleteDeployment API operation that you can use to delete deployment resources. This release also adds support for discontinued AWS-provided components, so AWS can communicate when a component has any issues that you should consider before you deploy it. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.12.0](service/ioteventsdata/CHANGELOG.md#v1120-2022-05-18) + * **Feature**: Introducing new API for deleting detectors: BatchDeleteDetector. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.22.0](service/quicksight/CHANGELOG.md#v1220-2022-05-18) + * **Feature**: API UpdatePublicSharingSettings enables IAM admins to enable/disable account level setting for public access of dashboards. When enabled, owners/co-owners for dashboards can enable public access on their dashboards. These dashboards can only be accessed through share link or embedding. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.19.0](service/transfer/CHANGELOG.md#v1190-2022-05-18) + * **Feature**: AWS Transfer Family now supports SetStat server configuration option, which provides the ability to ignore SetStat command issued by file transfer clients, enabling customers to upload files without any errors. + +# Release (2022-05-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.12](internal/ini/CHANGELOG.md#v1312-2022-05-17) + * **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.25.0](service/glue/CHANGELOG.md#v1250-2022-05-17) + * **Feature**: This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.1](service/iotsecuretunneling/CHANGELOG.md#v1131-2022-05-17) + * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.2](service/kms/CHANGELOG.md#v1172-2022-05-17) + * **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.11.5](service/mobile/CHANGELOG.md#v1115-2022-05-17) + * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686). + +# Release (2022-05-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.13.0](service/applicationdiscoveryservice/CHANGELOG.md#v1130-2022-05-16) + * **Feature**: Add Migration Evaluator Collector details to the GetDiscoverySummary API response +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.18.0](service/cloudfront/CHANGELOG.md#v1180-2022-05-16) + * **Feature**: Introduced a new error (TooLongCSPInResponseHeadersPolicy) that is returned when the value of the Content-Security-Policy header in a response headers policy exceeds the maximum allowed length. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.1](service/rekognition/CHANGELOG.md#v1181-2022-05-16) + * **Documentation**: Documentation updates for Amazon Rekognition. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.0](service/resiliencehub/CHANGELOG.md#v160-2022-05-16) + * **Feature**: In this release, we are introducing support for Amazon Elastic Container Service, Amazon Route 53, AWS Elastic Disaster Recovery, AWS Backup in addition to the existing supported Services. This release also supports Terraform file input from S3 and scheduling daily assessments +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.2](service/servicecatalog/CHANGELOG.md#v1142-2022-05-16) + * **Documentation**: Updated the descriptions for the ListAcceptedPortfolioShares API description and the PortfolioShareType parameters. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.5](service/sts/CHANGELOG.md#v1165-2022-05-16) + * **Documentation**: Documentation updates for AWS Security Token Service. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.6.0](service/workspacesweb/CHANGELOG.md#v160-2022-05-16) + * **Feature**: Amazon WorkSpaces Web now supports Administrator timeout control + +# Release (2022-05-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.9.0](service/grafana/CHANGELOG.md#v190-2022-05-13) + * **Feature**: This release adds APIs for creating and deleting API keys in an Amazon Managed Grafana workspace. + +# Release (2022-05-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.43.0](service/ec2/CHANGELOG.md#v1430-2022-05-12) + * **Feature**: This release introduces a target type Gateway Load Balancer Endpoint for mirrored traffic. Customers can now specify GatewayLoadBalancerEndpoint option during the creation of a traffic mirror target. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.5](service/finspacedata/CHANGELOG.md#v1105-2022-05-12) + * **Documentation**: We've now deprecated CreateSnapshot permission for creating a data view, instead use CreateDataView permission. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.1](service/iot/CHANGELOG.md#v1251-2022-05-12) + * **Documentation**: Documentation update for China region ListMetricValues for IoT +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.2](service/ivschat/CHANGELOG.md#v102-2022-05-12) + * **Documentation**: Documentation-only updates for IVS Chat API Reference. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.27.0](service/kendra/CHANGELOG.md#v1270-2022-05-12) + * **Feature**: Amazon Kendra now provides a data source connector for Jira. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-jira.html +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.23.0](service/lambda/CHANGELOG.md#v1230-2022-05-12) + * **Feature**: Lambda releases NodeJs 16 managed runtime to be available in all commercial regions. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.21.0](service/lightsail/CHANGELOG.md#v1210-2022-05-12) + * **Feature**: This release adds support to include inactive database bundles in the response of the GetRelationalDatabaseBundles request. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.1](service/outposts/CHANGELOG.md#v1191-2022-05-12) + * **Documentation**: Documentation updates for AWS Outposts. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.14.0](service/ssmincidents/CHANGELOG.md#v1140-2022-05-12) + * **Feature**: Adding support for dynamic SSM Runbook parameter values. Updating validation pattern for engagements. Adding ConflictException to UpdateReplicationSet API contract. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.6](service/transfer/CHANGELOG.md#v1186-2022-05-12) + * **Documentation**: AWS Transfer Family now accepts ECDSA keys for server host keys + +# Release (2022-05-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.42.0](service/ec2/CHANGELOG.md#v1420-2022-05-11) + * **Feature**: This release updates AWS PrivateLink APIs to support IPv6 for PrivateLink Services and Endpoints of type 'Interface'. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.7](service/secretsmanager/CHANGELOG.md#v1157-2022-05-11) + * **Documentation**: Doc only update for Secrets Manager that fixes several customer-reported issues. + +# Release (2022-05-10) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.17.5](service/computeoptimizer/CHANGELOG.md#v1175-2022-05-10) + * **Documentation**: Documentation updates for Compute Optimizer +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.41.0](service/ec2/CHANGELOG.md#v1410-2022-05-10) + * **Feature**: Added support for using NitroTPM and UEFI Secure Boot on EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.21.0](service/eks/CHANGELOG.md#v1210-2022-05-10) + * **Feature**: Adds BOTTLEROCKET_ARM_64_NVIDIA and BOTTLEROCKET_x86_64_NVIDIA AMI types to EKS managed nodegroups +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.18.0](service/emr/CHANGELOG.md#v1180-2022-05-10) + * **Feature**: This release updates the Amazon EMR ModifyInstanceGroups API to support "MERGE" type cluster reconfiguration. Also, added the ability to specify a particular Amazon Linux release for all nodes in a cluster launch request. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.5](service/migrationhubrefactorspaces/CHANGELOG.md#v155-2022-05-10) + * **Documentation**: AWS Migration Hub Refactor Spaces documentation only update to fix a formatting issue. + +# Release (2022-05-09) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.15.5](config/CHANGELOG.md#v1155-2022-05-09) + * **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.10.0](service/cloudcontrol/CHANGELOG.md#v1100-2022-05-09) + * **Feature**: SDK release for Cloud Control API to include paginators for Python SDK. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.7.0](service/evidently/CHANGELOG.md#v170-2022-05-09) + * **Feature**: Add detail message inside GetExperimentResults API response to indicate experiment result availability +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.13.5](service/ssmcontacts/CHANGELOG.md#v1135-2022-05-09) + * **Documentation**: Fixed an error in the DescribeEngagement example for AWS Incident Manager. + +# Release (2022-05-06) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.40.0](service/ec2/CHANGELOG.md#v1400-2022-05-06) + * **Feature**: Add new state values for IPAMs, IPAM Scopes, and IPAM Pools. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.17.0](service/location/CHANGELOG.md#v1170-2022-05-06) + * **Feature**: Amazon Location Service now includes a MaxResults parameter for ListGeofences requests. +* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.16.0](service/mediapackage/CHANGELOG.md#v1160-2022-05-06) + * **Feature**: This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.1](service/rds/CHANGELOG.md#v1211-2022-05-06) + * **Documentation**: Various documentation improvements. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.24.0](service/redshift/CHANGELOG.md#v1240-2022-05-06) + * **Feature**: Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.1](service/securityhub/CHANGELOG.md#v1211-2022-05-06) + * **Documentation**: Documentation updates for Security Hub API reference + +# Release (2022-05-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.16.0](service/datasync/CHANGELOG.md#v1160-2022-05-05) + * **Feature**: AWS DataSync now supports a new ObjectTags Task API option that can be used to control whether Object Tags are transferred. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.39.0](service/ec2/CHANGELOG.md#v1390-2022-05-05) + * **Feature**: Amazon EC2 I4i instances are powered by 3rd generation Intel Xeon Scalable processors and feature up to 30 TB of local AWS Nitro SSD storage +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.0](service/iot/CHANGELOG.md#v1250-2022-05-05) + * **Feature**: AWS IoT Jobs now allows you to create up to 100,000 active continuous and snapshot jobs by using concurrency control. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.26.0](service/kendra/CHANGELOG.md#v1260-2022-05-05) + * **Feature**: AWS Kendra now supports hierarchical facets for a query. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/filtering.html + +# Release (2022-05-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.16.0](service/backup/CHANGELOG.md#v1160-2022-05-04) + * **Feature**: Adds support to 2 new filters about job complete time for 3 list jobs APIs in AWS Backup +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.0](service/iotsecuretunneling/CHANGELOG.md#v1130-2022-05-04) + * **Feature**: This release introduces a new API RotateTunnelAccessToken that allow revoking the existing tokens and generate new tokens +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.1](service/lightsail/CHANGELOG.md#v1201-2022-05-04) + * **Documentation**: Documentation updates for Lightsail +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.0](service/ssm/CHANGELOG.md#v1270-2022-05-04) + * **Feature**: This release adds the TargetMaps parameter in SSM State Manager API. + +# Release (2022-05-03) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.38.0](service/ec2/CHANGELOG.md#v1380-2022-05-03) + * **Feature**: Adds support for allocating Dedicated Hosts on AWS Outposts. The AllocateHosts API now accepts an OutpostArn request parameter, and the DescribeHosts API now includes an OutpostArn response parameter. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.12.0](service/kinesisvideo/CHANGELOG.md#v1120-2022-05-03) + * **Feature**: Add support for multiple image feature related APIs for configuring image generation and notification of a video stream. Add "GET_IMAGES" to the list of supported API names for the GetDataEndpoint API. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.13.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1130-2022-05-03) + * **Feature**: Add support for GetImages API for retrieving images from a video stream +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.8](service/s3/CHANGELOG.md#v1268-2022-05-03) + * **Documentation**: Documentation only update for doc bug fixes for the S3 API docs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.30.0](service/sagemaker/CHANGELOG.md#v1300-2022-05-03) + * **Feature**: SageMaker Autopilot adds new metrics for all candidate models generated by Autopilot experiments; RStudio on SageMaker now allows users to bring your own development environment in a custom image. + +# Release (2022-05-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.16.0](service/organizations/CHANGELOG.md#v1160-2022-05-02) + * **Feature**: This release adds the INVALID_PAYMENT_INSTRUMENT as a fail reason and an error message. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.0](service/outposts/CHANGELOG.md#v1190-2022-05-02) + * **Feature**: This release adds a new API called ListAssets to the Outposts SDK, which lists the hardware assets in an Outpost. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.15.0](service/synthetics/CHANGELOG.md#v1150-2022-05-02) + * **Feature**: CloudWatch Synthetics has introduced a new feature to provide customers with an option to delete the underlying resources that Synthetics canary creates when the user chooses to delete the canary. + +# Release (2022-04-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.0](service/codegurureviewer/CHANGELOG.md#v1160-2022-04-29) + * **Feature**: Amazon CodeGuru Reviewer now supports suppressing recommendations from being generated on specific files and directories. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.23.0](service/mediaconvert/CHANGELOG.md#v1230-2022-04-29) + * **Feature**: AWS Elemental MediaConvert SDK nows supports creation of Dolby Vision profile 8.1, the ability to generate black frames of video, and introduces audio-only DASH and CMAF support. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.0](service/rds/CHANGELOG.md#v1210-2022-04-29) + * **Feature**: Feature - Adds support for Internet Protocol Version 6 (IPv6) on RDS database instances. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.26.0](service/ssm/CHANGELOG.md#v1260-2022-04-29) + * **Feature**: Update the StartChangeRequestExecution, adding TargetMaps to the Runbook parameter +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.20.0](service/wafv2/CHANGELOG.md#v1200-2022-04-29) + * **Feature**: You can now inspect all request headers and all cookies. You can now specify how to handle oversize body contents in your rules that inspect the body. + +# Release (2022-04-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.5](service/auditmanager/CHANGELOG.md#v1185-2022-04-28) + * **Documentation**: This release adds documentation updates for Audit Manager. We provided examples of how to use the Custom_ prefix for the keywordValue attribute. We also provided more details about the DeleteAssessmentReport operation. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.16.0](service/braket/CHANGELOG.md#v1160-2022-04-28) + * **Feature**: This release enables Braket Hybrid Jobs with Embedded Simulators to have multiple instances. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.24.0](service/connect/CHANGELOG.md#v1240-2022-04-28) + * **Feature**: This release introduces an API for changing the current agent status of a user in Connect. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.37.0](service/ec2/CHANGELOG.md#v1370-2022-04-28) + * **Feature**: This release adds support to query the public key and creation date of EC2 Key Pairs. Additionally, the format (pem or ppk) of a key pair can be specified when creating a new key pair. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.13.5](service/guardduty/CHANGELOG.md#v1135-2022-04-28) + * **Documentation**: Documentation update for API description. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.17.0](service/networkfirewall/CHANGELOG.md#v1170-2022-04-28) + * **Feature**: AWS Network Firewall adds support for stateful threat signature AWS managed rule groups. + +# Release (2022-04-27) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.5](service/amplify/CHANGELOG.md#v1115-2022-04-27) + * **Documentation**: Documentation only update to support the Amplify GitHub App feature launch +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.0.0](service/chimesdkmediapipelines/CHANGELOG.md#v100-2022-04-27) + * **Release**: New AWS service client module + * **Feature**: For Amazon Chime SDK meetings, the Amazon Chime Media Pipelines SDK allows builders to capture audio, video, and content share streams. You can also capture meeting events, live transcripts, and data messages. The pipelines save the artifacts to an Amazon S3 bucket that you designate. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.16.0](service/cloudtrail/CHANGELOG.md#v1160-2022-04-27) + * **Feature**: Increases the retention period maximum to 2557 days. Deprecates unused fields of the ListEventDataStores API response. Updates documentation. +* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.5](service/internal/checksum/CHANGELOG.md#v115-2022-04-27) + * **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.19.0](service/iotwireless/CHANGELOG.md#v1190-2022-04-27) + * **Feature**: Add list support for event configurations, allow to get and update event configurations by resource type, support LoRaWAN events; Make NetworkAnalyzerConfiguration as a resource, add List, Create, Delete API support; Add FCntStart attribute support for ABP WirelessDevice. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.13.0](service/lookoutequipment/CHANGELOG.md#v1130-2022-04-27) + * **Feature**: This release adds the following new features: 1) Introduces an option for automatic schema creation 2) Now allows for Ingestion of data containing most common errors and allows automatic data cleaning 3) Introduces new API ListSensorStatistics that gives further information about the ingested data +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.0](service/rekognition/CHANGELOG.md#v1180-2022-04-27) + * **Feature**: This release adds support to configure stream-processor resources for label detections on streaming-videos. UpateStreamProcessor API is also launched with this release, which could be used to update an existing stream-processor. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.29.0](service/sagemaker/CHANGELOG.md#v1290-2022-04-27) + * **Feature**: Amazon SageMaker Autopilot adds support for custom validation dataset and validation ratio through the CreateAutoMLJob and DescribeAutoMLJob APIs. + +# Release (2022-04-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.17.0](service/cloudfront/CHANGELOG.md#v1170-2022-04-26) + * **Feature**: CloudFront now supports the Server-Timing header in HTTP responses sent from CloudFront. You can use this header to view metrics that help you gain insights about the behavior and performance of CloudFront. To use this header, enable it in a response headers policy. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.2](service/glue/CHANGELOG.md#v1242-2022-04-26) + * **Documentation**: This release adds documentation for the APIs to create, read, delete, list, and batch read of AWS Glue custom patterns, and for Lake Formation configuration settings in the AWS Glue crawler. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.0](service/ivschat/CHANGELOG.md#v100-2022-04-26) + * **Release**: New AWS service client module + * **Feature**: Adds new APIs for IVS Chat, a feature for building interactive chat experiences alongside an IVS broadcast. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.0](service/lightsail/CHANGELOG.md#v1200-2022-04-26) + * **Feature**: This release adds support for Lightsail load balancer HTTP to HTTPS redirect and TLS policy configuration. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.16.0](service/networkfirewall/CHANGELOG.md#v1160-2022-04-26) + * **Feature**: AWS Network Firewall now enables customers to use a customer managed AWS KMS key for the encryption of their firewall resources. +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.14.5](service/pricing/CHANGELOG.md#v1145-2022-04-26) + * **Documentation**: Documentation updates for Price List API +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.28.0](service/sagemaker/CHANGELOG.md#v1280-2022-04-26) + * **Feature**: SageMaker Inference Recommender now accepts customer KMS key ID for encryption of endpoints and compilation outputs created during inference recommendation. + +# Release (2022-04-25) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.16.3 + * **Dependency Update**: Update SDK's internal copy of golang.org/x/sync/singleflight to address issue with test failing due to timeing issues +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.12.0](credentials/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.23.0](service/connect/CHANGELOG.md#v1230-2022-04-25) + * **Feature**: This release adds SearchUsers API which can be used to search for users with a Connect Instance +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.4](service/gamelift/CHANGELOG.md#v1144-2022-04-25) + * **Documentation**: Documentation updates for Amazon GameLift. +* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.13.0](service/mq/CHANGELOG.md#v1130-2022-04-25) + * **Feature**: This release adds the CRITICAL_ACTION_REQUIRED broker state and the ActionRequired API property. CRITICAL_ACTION_REQUIRED informs you when your broker is degraded. ActionRequired provides you with a code which you can use to find instructions in the Developer Guide on how to resolve the issue. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.0](service/rdsdata/CHANGELOG.md#v1120-2022-04-25) + * **Feature**: Support to receive SQL query results in the form of a simplified JSON string. This enables developers using the new JSON string format to more easily convert it to an object using popular JSON string parsing libraries. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.0](service/securityhub/CHANGELOG.md#v1210-2022-04-25) + * **Feature**: Security Hub now lets you opt-out of auto-enabling the defaults standards (CIS and FSBP) in accounts that are auto-enabled with Security Hub via Security Hub's integration with AWS Organizations. + +# Release (2022-04-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.9.0](service/chimesdkmeetings/CHANGELOG.md#v190-2022-04-22) + * **Feature**: Include additional exceptions types. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.36.0](service/ec2/CHANGELOG.md#v1360-2022-04-22) + * **Feature**: Adds support for waiters that automatically poll for a deleted NAT Gateway until it reaches the deleted state. + +# Release (2022-04-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.5](service/elasticache/CHANGELOG.md#v1205-2022-04-21) + * **Documentation**: Doc only update for ElastiCache +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.0](service/glue/CHANGELOG.md#v1240-2022-04-21) + * **Feature**: This release adds APIs to create, read, delete, list, and batch read of Glue custom entity types +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.21.0](service/iotsitewise/CHANGELOG.md#v1210-2022-04-21) + * **Feature**: This release adds 3 new batch data query APIs : BatchGetAssetPropertyValue, BatchGetAssetPropertyValueHistory and BatchGetAssetPropertyAggregates +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.7.0](service/iottwinmaker/CHANGELOG.md#v170-2022-04-21) + * **Feature**: General availability (GA) for AWS IoT TwinMaker. For more information, see https://docs.aws.amazon.com/iot-twinmaker/latest/apireference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.12.0](service/lookoutmetrics/CHANGELOG.md#v1120-2022-04-21) + * **Feature**: Added DetectMetricSetConfig API for detecting configuration required for creating metric set from provided S3 data source. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.17.0](service/mediatailor/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release introduces tiered channels and adds support for live sources. Customers using a STANDARD channel can now create programs using live sources. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.5](service/secretsmanager/CHANGELOG.md#v1155-2022-04-21) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.17.0](service/storagegateway/CHANGELOG.md#v1170-2022-04-21) + * **Feature**: This release adds support for minimum of 5 character length virtual tape barcodes. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.8.0](service/wisdom/CHANGELOG.md#v180-2022-04-21) + * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations. + +# Release (2022-04-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.22.0](service/connect/CHANGELOG.md#v1220-2022-04-20) + * **Feature**: This release adds APIs to search, claim, release, list, update, and describe phone numbers. You can also use them to associate and disassociate contact flows to phone numbers. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.21.0](service/macie2/CHANGELOG.md#v1210-2022-04-20) + * **Feature**: Sensitive data findings in Amazon Macie now indicate how Macie found the sensitive data that produced a finding (originType). +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.14.0](service/mgn/CHANGELOG.md#v1140-2022-04-20) + * **Feature**: Removed required annotation from input fields in Describe operations requests. Added quotaValue to ServiceQuotaExceededException +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.20.0](service/rds/CHANGELOG.md#v1200-2022-04-20) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Aurora Serverless v2 instances. + +# Release (2022-04-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.0](service/autoscaling/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: EC2 Auto Scaling now adds default instance warm-up times for all scaling activities, health check replacements, and other replacement events in the Auto Scaling instance lifecycle. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.25.0](service/kendra/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Amazon Kendra now provides a data source connector for Quip. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-quip.html +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.0](service/kms/CHANGELOG.md#v1170-2022-04-19) + * **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.19.0](service/personalize/CHANGELOG.md#v1190-2022-04-19) + * **Feature**: Adding StartRecommender and StopRecommender APIs for Personalize. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.15.0](service/polly/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: Amazon Polly adds new Austrian German voice - Hannah. Hannah is available as Neural voice only. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.23.0](service/redshift/CHANGELOG.md#v1230-2022-04-19) + * **Feature**: Introduces new fields for LogDestinationType and LogExports on EnableLogging requests and Enable/Disable/DescribeLogging responses. Customers can now select CloudWatch Logs as a destination for their Audit Logs. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.25.0](service/ssm/CHANGELOG.md#v1250-2022-04-19) + * **Feature**: Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression when creating SSM association. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.15.0](service/textract/CHANGELOG.md#v1150-2022-04-19) + * **Feature**: This release adds support for specifying and extracting information from documents using the Queries feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.4](service/transfer/CHANGELOG.md#v1184-2022-04-19) + * **Documentation**: This release contains corrected HomeDirectoryMappings examples for several API functions: CreateAccess, UpdateAccess, CreateUser, and UpdateUser,. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.12.0](service/worklink/CHANGELOG.md#v1120-2022-04-19) + * **Feature**: Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK. + +# Release (2022-04-15) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.9.0](feature/dynamodb/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.9.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v190-2022-04-15) + * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.15.0](service/athena/CHANGELOG.md#v1150-2022-04-15) + * **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.19.0](service/lightsail/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: This release adds support to describe the synchronization status of the account-level block public access feature for your Amazon Lightsail buckets. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.19.0](service/rds/CHANGELOG.md#v1190-2022-04-15) + * **Feature**: Removes Amazon RDS on VMware with the deletion of APIs related to Custom Availability Zones and Media installation + +# Release (2022-04-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.15.0](service/appflow/CHANGELOG.md#v1150-2022-04-14) + * **Feature**: Enables users to pass custom token URL parameters for Oauth2 authentication during create connector profile +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.16.0](service/appstream/CHANGELOG.md#v1160-2022-04-14) + * **Feature**: Includes updates for create and update fleet APIs to manage the session scripts locations for Elastic fleets. +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.0](service/batch/CHANGELOG.md#v1180-2022-04-14) + * **Feature**: Enables configuration updates for compute environments with BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.1](service/cloudwatch/CHANGELOG.md#v1181-2022-04-14) + * **Documentation**: Updates documentation for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.1](service/ec2/CHANGELOG.md#v1351-2022-04-14) + * **Documentation**: Documentation updates for Amazon EC2. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.23.0](service/glue/CHANGELOG.md#v1230-2022-04-14) + * **Feature**: Auto Scaling for Glue version 3.0 and later jobs to dynamically scale compute resources. This SDK change provides customers with the auto-scaled DPU usage + +# Release (2022-04-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.0](service/cloudwatch/CHANGELOG.md#v1180-2022-04-13) + * **Feature**: Adds support for additional statistics in CloudWatch Metric Streams. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.23.0](service/fsx/CHANGELOG.md#v1230-2022-04-13) + * **Feature**: This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone. + +# Release (2022-04-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.17.0](service/devopsguru/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: This release adds new APIs DeleteInsight to deletes the insight along with the associated anomalies, events and recommendations. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.0](service/ec2/CHANGELOG.md#v1350-2022-04-12) + * **Feature**: X2idn and X2iedn instances are powered by 3rd generation Intel Xeon Scalable processors with an all-core turbo frequency up to 3.5 GHzAmazon EC2. C6a instances are powered by 3rd generation AMD EPYC processors. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.17.0](service/efs/CHANGELOG.md#v1170-2022-04-12) + * **Feature**: Amazon EFS adds support for a ThrottlingException when using the CreateAccessPoint API if the account is nearing the AccessPoint limit(120). +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.6.0](service/iottwinmaker/CHANGELOG.md#v160-2022-04-12) + * **Feature**: This release adds the following new features: 1) ListEntities API now supports search using ExternalId. 2) BatchPutPropertyValue and GetPropertyValueHistory API now allows users to represent time in sub-second level precisions. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.15.4](service/kinesis/CHANGELOG.md#v1154-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.14.4](service/lexruntimev2/CHANGELOG.md#v1144-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.5](service/s3/CHANGELOG.md#v1265-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.4](service/transcribestreaming/CHANGELOG.md#v164-2022-04-12) + * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly. + +# Release (2022-04-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.6.0](service/amplifyuibuilder/CHANGELOG.md#v160-2022-04-11) + * **Feature**: In this release, we have added the ability to bind events to component level actions. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.0](service/apprunner/CHANGELOG.md#v1120-2022-04-11) + * **Feature**: This release adds tracing for App Runner services with X-Ray using AWS Distro for OpenTelemetry. New APIs: CreateObservabilityConfiguration, DescribeObservabilityConfiguration, ListObservabilityConfigurations, and DeleteObservabilityConfiguration. Updated APIs: CreateService and UpdateService. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.18.0](service/workspaces/CHANGELOG.md#v1180-2022-04-11) + * **Feature**: Added API support that allows customers to create GPU-enabled WorkSpaces using EC2 G4dn instances. + +# Release (2022-04-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.22.0](service/mediaconvert/CHANGELOG.md#v1220-2022-04-08) + * **Feature**: AWS Elemental MediaConvert SDK has added support for the pass-through of WebVTT styling to WebVTT outputs, pass-through of KLV metadata to supported formats, and improved filter support for processing 444/RGB content. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.17.0](service/mediapackagevod/CHANGELOG.md#v1170-2022-04-08) + * **Feature**: This release adds ScteMarkersSource as an available field for Dash Packaging Configurations. When set to MANIFEST, MediaPackage will source the SCTE-35 markers from the manifest. When set to SEGMENTS, MediaPackage will source the SCTE-35 markers from the segments. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.19.0](service/wafv2/CHANGELOG.md#v1190-2022-04-08) + * **Feature**: Add a new CurrentDefaultVersion field to ListAvailableManagedRuleGroupVersions API response; add a new VersioningSupported boolean to each ManagedRuleGroup returned from ListAvailableManagedRuleGroups API response. + +# Release (2022-04-07) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.0](internal/v4a/CHANGELOG.md#v100-2022-04-07) + * **Release**: New internal v4a signing module location. +* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.18.0](service/docdb/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: Added support to enable/disable performance insights when creating or modifying db instances +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.0](service/eventbridge/CHANGELOG.md#v1160-2022-04-07) + * **Feature**: Adds new EventBridge Endpoint resources for disaster recovery, multi-region failover, and cross-region replication capabilities to help you build resilient event-driven applications. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.18.0](service/personalize/CHANGELOG.md#v1180-2022-04-07) + * **Feature**: This release provides tagging support in AWS Personalize. +* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.14.4](service/pi/CHANGELOG.md#v1144-2022-04-07) + * **Documentation**: Adds support for DocumentDB to the Performance Insights API. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.27.0](service/sagemaker/CHANGELOG.md#v1270-2022-04-07) + * **Feature**: Amazon Sagemaker Notebook Instances now supports G5 instance types + +# Release (2022-04-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.0](service/configservice/CHANGELOG.md#v1210-2022-04-06) + * **Feature**: Add resourceType enums for AWS::EMR::SecurityConfiguration and AWS::SageMaker::CodeRepository +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.24.0](service/kendra/CHANGELOG.md#v1240-2022-04-06) + * **Feature**: Amazon Kendra now provides a data source connector for Box. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-box.html +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.22.0](service/lambda/CHANGELOG.md#v1220-2022-04-06) + * **Feature**: This release adds new APIs for creating and managing Lambda Function URLs and adds a new FunctionUrlAuthType parameter to the AddPermission API. Customers can use Function URLs to create built-in HTTPS endpoints on their functions. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.7.0](service/panorama/CHANGELOG.md#v170-2022-04-06) + * **Feature**: Added Brand field to device listings. + +# Release (2022-04-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.15.0](service/datasync/CHANGELOG.md#v1150-2022-04-05) + * **Feature**: AWS DataSync now supports Amazon FSx for OpenZFS locations. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.22.0](service/fsx/CHANGELOG.md#v1220-2022-04-05) + * **Feature**: Provide customers more visibility into file system status by adding new "Misconfigured Unavailable" status for Amazon FSx for Windows File Server. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.21.4](service/s3control/CHANGELOG.md#v1214-2022-04-05) + * **Documentation**: Documentation-only update for doc bug fixes for the S3 Control API docs. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.20.0](service/securityhub/CHANGELOG.md#v1200-2022-04-05) + * **Feature**: Added additional ASFF details for RdsSecurityGroup AutoScalingGroup, ElbLoadBalancer, CodeBuildProject and RedshiftCluster. + +# Release (2022-04-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.24.0](service/iot/CHANGELOG.md#v1240-2022-04-04) + * **Feature**: AWS IoT - AWS IoT Device Defender adds support to list metric datapoints collected for IoT devices through the ListMetricValues API +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.13.0](service/proton/CHANGELOG.md#v1130-2022-04-04) + * **Feature**: SDK release to support tagging for AWS Proton Repository resource +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.0](service/servicecatalog/CHANGELOG.md#v1140-2022-04-04) + * **Feature**: This release adds ProvisioningArtifictOutputKeys to DescribeProvisioningParameters to reference the outputs of a Provisioned Product and deprecates ProvisioningArtifactOutputs. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.12.4](service/sms/CHANGELOG.md#v1124-2022-04-04) + * **Documentation**: Revised product update notice for SMS console deprecation. + +# Release (2022-04-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.21.0](service/connect/CHANGELOG.md#v1210-2022-04-01) + * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable multi-party conferencing using attribute type MULTI_PARTY_CONFERENCING on the specified Amazon Connect instance. + +# Release (2022-03-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.8.4](feature/dynamodb/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.8.4](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v184-2022-03-31) + * **Documentation**: Fixes documentation typos in Number type's helper methods +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.3](service/auditmanager/CHANGELOG.md#v1183-2022-03-31) + * **Documentation**: This release adds documentation updates for Audit Manager. The updates provide data deletion guidance when a customer deregisters Audit Manager or deregisters a delegated administrator. +* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.9.0](service/cloudcontrol/CHANGELOG.md#v190-2022-03-31) + * **Feature**: SDK release for Cloud Control API in Amazon Web Services China (Beijing) Region, operated by Sinnet, and Amazon Web Services China (Ningxia) Region, operated by NWCD +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.20.0](service/databrew/CHANGELOG.md#v1200-2022-03-31) + * **Feature**: This AWS Glue Databrew release adds feature to support ORC as an input format. +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.8.0](service/grafana/CHANGELOG.md#v180-2022-03-31) + * **Feature**: This release adds tagging support to the Managed Grafana service. New APIs: TagResource, UntagResource and ListTagsForResource. Updates: add optional field tags to support tagging while calling CreateWorkspace. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.0.0](service/pinpointsmsvoicev2/CHANGELOG.md#v100-2022-03-31) + * **Release**: New AWS service client module + * **Feature**: Amazon Pinpoint now offers a version 2.0 suite of SMS and voice APIs, providing increased control over sending and configuration. This release is a new SDK for sending SMS and voice messages called PinpointSMSVoiceV2. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.9.0](service/route53recoverycluster/CHANGELOG.md#v190-2022-03-31) + * **Feature**: This release adds a new API "ListRoutingControls" to list routing control states using the highly reliable Route 53 ARC data plane endpoints. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.17.0](service/workspaces/CHANGELOG.md#v1170-2022-03-31) + * **Feature**: Added APIs that allow you to customize the logo, login message, and help links in the WorkSpaces client login page. To learn more, visit https://docs.aws.amazon.com/workspaces/latest/adminguide/customize-branding.html + # Release (2022-03-30) ## General Highlights diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index b139a33460..4b761e771a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -72,22 +72,22 @@ all: generate unit # Code Generation # ################### .PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \ -gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy gen-aws-ptrs tidy-modules-% \ +gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \ add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \ sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \ update-module-metadata download-modules-% generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \ -gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy min-go-version-. \ +gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \ tidy-modules-. add-module-license-files gen-aws-ptrs format smithy-generate: cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean -smithy-build: gen-repo-mod-replace +smithy-build: cd codegen && ./gradlew clean build -Plog-tests -smithy-build-%: gen-repo-mod-replace +smithy-build-%: @# smithy-build- command that uses the pattern to define build filter that @# the smithy API model service id starts with. Strips off the @# "smithy-build-". @@ -126,13 +126,25 @@ gen-repo-mod-replace: @echo "Generating go.mod replace for repo modules" go run ${REPOTOOLS_CMD_MAKE_RELATIVE} -gen-mod-replace-smithy: +gen-mod-replace-smithy-%: + @# gen-mod-replace-smithy- command that uses the pattern to define build filter that + @# for modules to add replace to. Strips off the "gen-mod-replace-smithy-". + @# + @# SMITHY_GO_SRC environment variable is the path to add replace to + @# + @# e.g. gen-mod-replace-smithy-service_ssooidc cd ./internal/repotools/cmd/eachmodule \ - && go run . "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}" + && go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}" -gen-mod-dropreplace-smithy: +gen-mod-dropreplace-smithy-%: + @# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that + @# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-". + @# + @# e.g. gen-mod-dropreplace-smithy-service_ssooidc cd ./internal/repotools/cmd/eachmodule \ - && go run . "go mod edit -dropreplace github.com/aws/smithy-go" + && go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \ + "go mod edit -dropreplace github.com/aws/smithy-go" gen-aws-ptrs: cd aws && go generate @@ -466,6 +478,21 @@ sdkv1check: echo "$$sdkv1usage"; \ if [ "$$sdkv1usage" != "" ]; then exit 1; fi +list-deps: list-deps-. + +list-deps-%: + @# command that uses the pattern to define the root path that the + @# module testing will start from. Strips off the "list-deps-" and + @# replaces all "_" with "/". + @# + @# Trim output to only include stdout for list of dependencies only. + @# make list-deps 2>&- + @# + @# e.g. list-deps-internal_protocoltest + @cd ./internal/repotools/cmd/eachmodule \ + && go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \ + "go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u + ################### # Sandbox Testing # ################### diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md index bb4349fea2..cda17f77d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/README.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -129,6 +129,8 @@ The v2 SDK will use GitHub [Issues] to track feature requests and issues with th [SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and use the AWS SDK for Go V2. +[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go. + [SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index df2abb58cd..ac8bfd0c61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -3,13 +3,14 @@ package aws import ( "net/http" + smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" ) // HTTPClient provides the interface to provide custom HTTPClients. Generally // *http.Client is sufficient for most use cases. The HTTPClient should not -// follow redirects. +// follow 301 or 302 redirects. type HTTPClient interface { Do(*http.Request) (*http.Response, error) } @@ -30,6 +31,18 @@ type Config struct { // variables, shared credential file, and EC2 Instance Roles. Credentials CredentialsProvider + // The Bearer Authentication token provider to use for authenticating API + // operation calls with a Bearer Authentication token. The API clients and + // operation must support Bearer Authentication scheme in order for the + // token provider to be used. API clients created with NewFromConfig will + // automatically be configured with this option, if the API client support + // Bearer Authentication. + // + // The SDK's config.LoadDefaultConfig can automatically populate this + // option for external configuration options such as SSO session. + // https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html + BearerAuthTokenProvider smithybearer.TokenProvider + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. // The SDK defaults to a BuildableClient allowing API clients to create // copies of the HTTP Client for service specific customizations. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go index dfd2b1ddbf..9e9525231c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -46,14 +46,14 @@ type CredentialsCacheOptions struct { // CredentialsCache will look for optional interfaces on the Provider to adjust // how the credential cache handles credentials caching. // -// * HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle -// credential refresh failures. This could return an updated Credentials -// value, or attempt another means of retrieving credentials. +// - HandleFailRefreshCredentialsCacheStrategy - Allows provider to handle +// credential refresh failures. This could return an updated Credentials +// value, or attempt another means of retrieving credentials. // -// * AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how -// credentials Expires is modified. This could modify how the Credentials -// Expires is adjusted based on the CredentialsCache ExpiryWindow option. -// Such as providing a floor not to reduce the Expires below. +// - AdjustExpiresByCredentialsCacheStrategy - Allows provider to adjust how +// credentials Expires is modified. This could modify how the Credentials +// Expires is adjusted based on the CredentialsCache ExpiryWindow option. +// Such as providing a floor not to reduce the Expires below. type CredentialsCache struct { provider CredentialsProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 0fffc53e67..24c8ce4a73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -23,41 +23,41 @@ import ( // The following example demonstrates using the AnonymousCredentials to prevent // SDK's external config loading attempt to resolve credentials. // -// cfg, err := config.LoadDefaultConfig(context.TODO(), -// config.WithCredentialsProvider(aws.AnonymousCredentials{}), -// ) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } +// cfg, err := config.LoadDefaultConfig(context.TODO(), +// config.WithCredentialsProvider(aws.AnonymousCredentials{}), +// ) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } // -// client := s3.NewFromConfig(cfg) +// client := s3.NewFromConfig(cfg) // // Alternatively you can leave the API client Option's `Credential` member to // nil. If using the `NewFromConfig` constructor you'll need to explicitly set // the `Credentials` member to nil, if the external config resolved a // credential provider. // -// client := s3.New(s3.Options{ -// // Credentials defaults to a nil value. -// }) +// client := s3.New(s3.Options{ +// // Credentials defaults to a nil value. +// }) // // This can also be configured for specific operations calls too. // -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// log.Fatalf("failed to load config, %v", err) -// } +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// log.Fatalf("failed to load config, %v", err) +// } // -// client := s3.NewFromConfig(config) +// client := s3.NewFromConfig(config) // -// result, err := client.GetObject(context.TODO(), s3.GetObject{ -// Bucket: aws.String("example-bucket"), -// Key: aws.String("example-key"), -// }, func(o *s3.Options) { -// o.Credentials = nil -// // Or -// o.Credentials = aws.AnonymousCredentials{} -// }) +// result, err := client.GetObject(context.TODO(), s3.GetObject{ +// Bucket: aws.String("example-bucket"), +// Key: aws.String("example-key"), +// }, func(o *s3.Options) { +// o.Credentials = nil +// // Or +// o.Credentials = aws.AnonymousCredentials{} +// }) type AnonymousCredentials struct{} // Retrieve implements the CredentialsProvider interface, but will always diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go index befc3bee1a..d8b6e09e59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/doc.go @@ -1,7 +1,7 @@ // Package aws provides the core SDK's utilities and shared types. Use this package's // utilities to simplify setting and reading API operations parameters. // -// Value and Pointer Conversion Utilities +// # Value and Pointer Conversion Utilities // // This package includes a helper conversion utility for each scalar type the SDK's // API use. These utilities make getting a pointer of the scalar, and dereferencing @@ -16,33 +16,33 @@ // to get pointer of a literal string value, because getting the address of a // literal requires assigning the value to a variable first. // -// var strPtr *string +// var strPtr *string // -// // Without the SDK's conversion functions -// str := "my string" -// strPtr = &str +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str // -// // With the SDK's conversion functions -// strPtr = aws.String("my string") +// // With the SDK's conversion functions +// strPtr = aws.String("my string") // -// // Convert *string to string value -// str = aws.ToString(strPtr) +// // Convert *string to string value +// str = aws.ToString(strPtr) // // In addition to scalars the aws package also includes conversion utilities for // map and slice for commonly types used in API parameters. The map and slice // conversion functions use similar naming pattern as the scalar conversion // functions. // -// var strPtrs []*string -// var strs []string = []string{"Go", "Gophers", "Go"} +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} // -// // Convert []string to []*string -// strPtrs = aws.StringSlice(strs) +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) // -// // Convert []*string to []string -// strs = aws.ToStringSlice(strPtrs) +// // Convert []*string to []string +// strs = aws.ToStringSlice(strPtrs) // -// SDK Default HTTP Client +// # SDK Default HTTP Client // // The SDK will use the http.DefaultClient if a HTTP client is not provided to // the SDK's Session, or service client constructor. This means that if the diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index f12e3356f4..22afeea901 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.2" +const goModuleVersion = "1.16.14" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go index 9e34d26f21..91c94d987b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/logging.go @@ -7,10 +7,12 @@ package aws // The entire 64-bit group is reserved for later expansion by the SDK. // // Example: Setting ClientLogMode to enable logging of retries and requests -// clientLogMode := aws.LogRetries | aws.LogRequest +// +// clientLogMode := aws.LogRetries | aws.LogRequest // // Example: Adding an additional log mode to an existing ClientLogMode value -// clientLogMode |= aws.LogResponse +// +// clientLogMode |= aws.LogResponse type ClientLogMode uint64 // Supported ClientLogMode bits that can be configured to toggle logging of specific SDK events. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index d5adfec90b..285b2bba89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -68,10 +68,12 @@ type requestUserAgent struct { // request. // // User-Agent example: -// aws-sdk-go-v2/1.2.3 +// +// aws-sdk-go-v2/1.2.3 // // X-Amz-User-Agent example: -// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 +// +// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15 func newRequestUserAgent() *requestUserAgent { userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder() addProductName(userAgent) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go index 77dd4d8db8..9d7d3a0cb5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go @@ -9,9 +9,9 @@ import ( // representation of a list of values of a fixed type. A serialized array might // look like the following: // -// ListName.member.1=foo -// &ListName.member.2=bar -// &Listname.member.3=baz +// ListName.member.1=foo +// &ListName.member.2=bar +// &Listname.member.3=baz type Array struct { // The query values to add the array to. values url.Values diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go index ab91e357bc..dea242b8b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/map.go @@ -11,10 +11,10 @@ import ( // the values must all be of the same type, and that map entries are ordered. // A serialized map might look like the following: // -// MapName.entry.1.key=Foo -// &MapName.entry.1.value=spam -// &MapName.entry.2.key=Bar -// &MapName.entry.2.value=eggs +// MapName.entry.1.key=Foo +// &MapName.entry.1.value=spam +// &MapName.entry.2.key=Bar +// &MapName.entry.2.value=eggs type Map struct { // The query values to add the map to. values url.Values diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go index debb413dec..6a99d4ea8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go @@ -10,8 +10,8 @@ import ( // values where there is a fixed set of keys whose values each have their // own known type. A serialized object might look like the following: // -// ObjectName.Foo=value -// &ObjectName.Bar=5 +// ObjectName.Foo=value +// &ObjectName.Bar=5 type Object struct { // The query values to add the object to. values url.Values diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go index 42ced06e24..3a08ebe0a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/doc.go @@ -1,12 +1,12 @@ // Package retry provides interfaces and implementations for SDK request retry behavior. // -// Retryer Interface and Implementations +// # Retryer Interface and Implementations // -// This packages defines Retryer interface that is used to either implement custom retry behavior -// or to extend the existing retry implementations provided by the SDK. This packages provides a single -// retry implementations: Standard. +// This package defines Retryer interface that is used to either implement custom retry behavior +// or to extend the existing retry implementations provided by the SDK. This package provides a single +// retry implementation: Standard. // -// Standard +// # Standard // // Standard is the default retryer implementation used by service clients. The standard retryer is a rate limited // retryer that has a configurable max attempts to limit the number of retry attempts when a retryable error occurs. @@ -15,66 +15,66 @@ // // By default the standard retryer uses the DefaultRetryables slice of IsErrorRetryable types to determine whether // a given error is retryable. By default this list of retryables includes the following: -// - Retrying errors that implement the RetryableError method, and return true. -// - Connection Errors -// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. -// - Connection Reset Errors. -// - net.OpErr types that are dialing errors or are temporary. -// - HTTP Status Codes: 500, 502, 503, and 504. -// - API Error Codes -// - RequestTimeout, RequestTimeoutException -// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, -// RequestThrottled, SlowDown, EC2ThrottledException -// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException -// - TransactionInProgressException, PriorRequestNotComplete +// - Retrying errors that implement the RetryableError method, and return true. +// - Connection Errors +// - Errors that implement a ConnectionError, Temporary, or Timeout method that return true. +// - Connection Reset Errors. +// - net.OpErr types that are dialing errors or are temporary. +// - HTTP Status Codes: 500, 502, 503, and 504. +// - API Error Codes +// - RequestTimeout, RequestTimeoutException +// - Throttling, ThrottlingException, ThrottledException, RequestThrottledException, TooManyRequestsException, +// RequestThrottled, SlowDown, EC2ThrottledException +// - ProvisionedThroughputExceededException, RequestLimitExceeded, BandwidthLimitExceeded, LimitExceededException +// - TransactionInProgressException, PriorRequestNotComplete // // The standard retryer will not retry a request in the event if the context associated with the request // has been cancelled. Applications must handle this case explicitly if they wish to retry with a different context // value. // // You can configure the standard retryer implementation to fit your applications by constructing a standard retryer -// using the NewStandard function, and providing one more functional arguments that mutate the StandardOptions +// using the NewStandard function, and providing one more functional argument that mutate the StandardOptions // structure. StandardOptions provides the ability to modify the token bucket rate limiter, retryable error conditions, // and the retry delay policy. // // For example to modify the default retry attempts for the standard retryer: // -// // configure the custom retryer -// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { -// o.MaxAttempts = 5 -// }) +// // configure the custom retryer +// customRetry := retry.NewStandard(func(o *retry.StandardOptions) { +// o.MaxAttempts = 5 +// }) // -// // create a service client with the retryer -// s3.NewFromConfig(cfg, func(o *s3.Options) { -// o.Retryer = customRetry -// }) +// // create a service client with the retryer +// s3.NewFromConfig(cfg, func(o *s3.Options) { +// o.Retryer = customRetry +// }) // -// Utilities +// # Utilities // // A number of package functions have been provided to easily wrap retryer implementations in an implementation agnostic // way. These are: // -// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable -// in addition to those considered retryable by the provided retryer. +// AddWithErrorCodes - Provides the ability to add additional API error codes that should be considered retryable +// in addition to those considered retryable by the provided retryer. // -// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping -// a retryer implementation. +// AddWithMaxAttempts - Provides the ability to set the max number of attempts for retrying a request by wrapping +// a retryer implementation. // -// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a -// request by wrapping a retryer implementation. +// AddWithMaxBackoffDelay - Provides the ability to set the max back off delay that can occur before retrying a +// request by wrapping a retryer implementation. // // The following package functions have been provided to easily satisfy different retry interfaces to further customize // a given retryer's behavior: // -// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, -// you can use this method to easily create custom back off policies to be used with the -// standard retryer. +// BackoffDelayerFunc - Can be used to wrap a function to satisfy the BackoffDelayer interface. For example, +// you can use this method to easily create custom back off policies to be used with the +// standard retryer. // -// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, -// this can be used to extend the standard retryer to add additional logic ot determine if a -// error should be retried. +// IsErrorRetryableFunc - Can be used to wrap a function to satisfy the IsErrorRetryable interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be retried. // -// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, -// this can be used to extend the standard retryer to add additional logic to determine if an -// error should be considered a timeout. +// IsErrorTimeoutFunc - Can be used to wrap a function to satisfy IsErrorTimeout interface. For example, +// this can be used to extend the standard retryer to add additional logic to determine if an +// error should be considered a timeout. package retry diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 926f5f5e1e..3326289a15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -90,7 +90,7 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next) attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata) - // AttempResult Retried states that the attempt was not successful, and + // AttemptResult Retried states that the attempt was not successful, and // should be retried. shouldRetry := attemptResult.Retried diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go index 0cb9cffaf5..d025dbaa06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/util.go @@ -46,19 +46,35 @@ func StripExcessSpaces(str string) string { return string(buf[:m]) } -// GetURIPath returns the escaped URI component from the provided URL +// GetURIPath returns the escaped URI component from the provided URL. func GetURIPath(u *url.URL) string { - var uri string + var uriPath string if len(u.Opaque) > 0 { - uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + const schemeSep, pathSep, queryStart = "//", "/", "?" + + opaque := u.Opaque + // Cut off the query string if present. + if idx := strings.Index(opaque, queryStart); idx >= 0 { + opaque = opaque[:idx] + } + + // Cutout the scheme separator if present. + if strings.Index(opaque, schemeSep) == 0 { + opaque = opaque[len(schemeSep):] + } + + // capture URI path starting with first path separator. + if idx := strings.Index(opaque, pathSep); idx >= 0 { + uriPath = opaque[idx:] + } } else { - uri = u.EscapedPath() + uriPath = u.EscapedPath() } - if len(uri) == 0 { - uri = "/" + if len(uriPath) == 0 { + uriPath = "/" } - return uri + return uriPath } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index 3f3bcf456a..db8377ae50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -82,7 +82,7 @@ func (m *dynamicPayloadSigningMiddleware) HandleBuild( } // if TLS is enabled, use unsigned payload when supported - if strings.EqualFold(req.URL.Scheme, "https") { + if req.IsHTTPS() { return (&unsignedPayload{}).HandleBuild(ctx, in, next) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index 06ba7773ab..afd069c1f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -3,20 +3,22 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional -// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// +// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires +// // additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent // to the service as. // // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -252,7 +254,7 @@ func buildAuthorizationHeader(credentialStr, signedHeadersStr, signingSignature // request has no payload you should use the hex encoded SHA-256 of an empty // string as the payloadHash value. // -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // // Some services such as Amazon S3 accept alternative values for the payload // hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be @@ -311,7 +313,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht // request has no payload you should use the hex encoded SHA-256 of an empty // string as the payloadHash value. // -// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +// "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" // // Some services such as Amazon S3 accept alternative values for the payload // hash, such as "UNSIGNED-PAYLOAD" for requests where the body will not be @@ -331,10 +333,10 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht // parameter is not used by all AWS services, and is most notable used by // Amazon S3 APIs. // -// expires := 20 * time.Minute -// query := req.URL.Query() -// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) -// req.URL.RawQuery = query.Encode() +// expires := 20 * time.Minute +// query := req.URL.Query() +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) +// req.URL.RawQuery = query.Encode() // // This method does not modify the provided request. func (s *Signer) PresignHTTP( @@ -407,8 +409,8 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he headers = append(headers, hostHeader) signed[hostHeader] = append(signed[hostHeader], host) + const contentLengthHeader = "content-length" if length > 0 { - const contentLengthHeader = "content-length" headers = append(headers, contentLengthHeader) signed[contentLengthHeader] = append(signed[contentLengthHeader], strconv.FormatInt(length, 10)) } @@ -417,6 +419,10 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he if !rule.IsValid(k) { continue // ignored header } + if strings.EqualFold(k, contentLengthHeader) { + // prevent signing already handled content-length header. + continue + } lowerCaseKey := strings.ToLower(k) if _, ok := signed[lowerCaseKey]; ok { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index b8a779e1a3..c0341ea171 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,91 @@ +# v1.17.5 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-08-14) + +* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present. + +# v1.16.1 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-08-10) + +* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`. + +# v1.15.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2022-05-09) + +* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682) + +# v1.15.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 79f067017e..5940f8e7ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -72,6 +72,10 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // implementations depend on or can be configured with earlier resolved // configuration options. resolveCredentials, + + // Sets the resolved bearer authentication token API clients will use for + // httpBearerAuth authentication scheme. + resolveBearerAuthToken, } // A Config represents a generic configuration value or set of values. This type @@ -137,17 +141,10 @@ func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigRes for _, fn := range resolvers { if err := fn(ctx, &cfg, cs); err != nil { - // TODO provide better error? return aws.Config{}, err } } - var sources []interface{} - for _, s := range cs { - sources = append(sources, s) - } - cfg.ConfigSources = sources - return cfg, nil } @@ -169,13 +166,12 @@ func (cs configs) ResolveConfig(f func(configs []interface{}) error) error { // The custom configurations must satisfy the respective providers for their data // or the custom data will be ignored by the resolvers and config loaders. // -// cfg, err := config.LoadDefaultConfig( context.TODO(), -// WithSharedConfigProfile("test-profile"), -// ) -// if err != nil { -// panic(fmt.Sprintf("failed loading config, %v", err)) -// } -// +// cfg, err := config.LoadDefaultConfig( context.TODO(), +// WithSharedConfigProfile("test-profile"), +// ) +// if err != nil { +// panic(fmt.Sprintf("failed loading config, %v", err)) +// } // // The default configuration sources are: // * Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go index 31648ffb57..aab7164e28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go @@ -15,6 +15,6 @@ // take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources // implement the same provider interface, priority will be handled by the order in which the sources were passed in. // -// A number of helpers (prefixed by ``With``) are provided in this package that implement their respective provider +// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider // interface. These helpers should be used for overriding configuration programmatically at runtime. package config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index bbfd492442..8c3e5c63ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.3" +const goModuleVersion = "1.17.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go index 22e6019fbd..625147e970 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" ) @@ -28,6 +29,9 @@ type LoadOptions struct { // Credentials object to use when signing requests. Credentials aws.CredentialsProvider + // Token provider for authentication operations with bearer authentication. + BearerAuthTokenProvider smithybearer.TokenProvider + // HTTPClient the SDK's API clients will use to invoke HTTP requests. HTTPClient HTTPClient @@ -128,6 +132,14 @@ type LoadOptions struct { // aws.CredentialsCacheOptions CredentialsCacheOptions func(*aws.CredentialsCacheOptions) + // BearerAuthTokenCacheOptions is a function for setting the smithy-go + // auth/bearer#TokenCacheOptions + BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions) + + // SSOTokenProviderOptions is a function for setting the + // credentials/ssocreds.SSOTokenProviderOptions + SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions) + // ProcessCredentialOptions is a function for setting // the processcreds.Options ProcessCredentialOptions func(*processcreds.Options) @@ -451,6 +463,73 @@ func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptio } } +// getBearerAuthTokenProvider returns the credentials value +func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) { + if o.BearerAuthTokenProvider == nil { + return nil, false, nil + } + + return o.BearerAuthTokenProvider, true, nil +} + +// WithBearerAuthTokenProvider is a helper function to construct functional options +// that sets Credential provider value on config's LoadOptions. If credentials +// provider is set to nil, the credentials provider value will be ignored. +// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenProvider = v + return nil + } +} + +// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) { + if o.BearerAuthTokenCacheOptions == nil { + return nil, false, nil + } + + return o.BearerAuthTokenCacheOptions, true, nil +} + +// WithBearerAuthTokenCacheOptions is a helper function to construct functional options +// that sets a function to modify the TokenCacheOptions the smithy-go +// auth/bearer#TokenCache will be configured with, if the TokenCache is used by +// the configuration loader. +// +// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides +// the previous call values. +func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.BearerAuthTokenCacheOptions = v + return nil + } +} + +// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions +func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) { + if o.SSOTokenProviderOptions == nil { + return nil, false, nil + } + + return o.SSOTokenProviderOptions, true, nil +} + +// WithSSOTokenProviderOptions is a helper function to construct functional +// options that sets a function to modify the SSOtokenProviderOptions the SDK's +// credentials/ssocreds#SSOProvider will be configured with, if the +// SSOTokenProvider is used by the configuration loader. +// +// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides +// the previous call values. +func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc { + return func(o *LoadOptions) error { + o.SSOTokenProviderOptions = v + return nil + } +} + // getProcessCredentialOptions returns the wrapped function to set processcreds.Options func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) { if o.ProcessCredentialOptions == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 3f12df1bfe..6f1ab8cd14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" + smithybearer "github.com/aws/smithy-go/auth/bearer" "github.com/aws/smithy-go/logging" "github.com/aws/smithy-go/middleware" ) @@ -185,6 +186,73 @@ func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) ( return } +// bearerAuthTokenProviderProvider provides access to the bearer authentication +// token external configuration value. +type bearerAuthTokenProviderProvider interface { + getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error) +} + +// getBearerAuthTokenProvider searches the config sources for a +// bearerAuthTokenProviderProvider and returns the value if found. Returns an +// error if a provider fails before a value is found. +func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) { + for _, cfg := range configs { + if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok { + p, found, err = provider.getBearerAuthTokenProvider(ctx) + if err != nil || found { + break + } + } + } + return +} + +// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +type bearerAuthTokenCacheOptionsProvider interface { + getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) +} + +// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for +// setting the smithy-go auth/bearer#TokenCacheOptions. +func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) ( + f func(*smithybearer.TokenCacheOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok { + f, found, err = p.getBearerAuthTokenCacheOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +type ssoTokenProviderOptionsProvider interface { + getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) +} + +// getSSOTokenProviderOptions is an interface for retrieving a function for +// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions. +func getSSOTokenProviderOptions(ctx context.Context, configs configs) ( + f func(*ssocreds.SSOTokenProviderOptions), found bool, err error, +) { + for _, config := range configs { + if p, ok := config.(ssoTokenProviderOptionsProvider); ok { + f, found, err = p.getSSOTokenProviderOptions(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ssoTokenProviderOptionsProvider + // processCredentialOptions is an interface for retrieving a function for setting // the processcreds.Options. type processCredentialOptions interface { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index 4a80247694..4428ba49c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -21,9 +21,15 @@ import ( // This should be used as the first resolver in the slice of resolvers when // resolving external configuration. func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error { + var sources []interface{} + for _, s := range cfgs { + sources = append(sources, s) + } + *cfg = aws.Config{ - Credentials: aws.AnonymousCredentials{}, - Logger: logging.NewStandardLogger(os.Stderr), + Credentials: aws.AnonymousCredentials{}, + Logger: logging.NewStandardLogger(os.Stderr), + ConfigSources: sources, } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go new file mode 100644 index 0000000000..ae5fb27bd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go @@ -0,0 +1,133 @@ +package config + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials/ssocreds" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + smithybearer "github.com/aws/smithy-go/auth/bearer" +) + +// resolveBearerAuthToken extracts a token provider from the config sources. +// +// If an explicit bearer authentication token provider is not found the +// resolver will fallback to resolving token provider via other config sources +// such as SharedConfig. +func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error { + found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs) + if found || err != nil { + return err + } + + return resolveBearerAuthTokenProviderChain(ctx, cfg, configs) +} + +// resolveBearerAuthTokenProvider extracts the first instance of +// BearerAuthTokenProvider from the config sources. +// +// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure +// the Token is only refreshed when needed. This also protects the +// TokenProvider so it can be used concurrently. +// +// Config providers used: +// * bearerAuthTokenProviderProvider +func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { + tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs) + if !found || err != nil { + return false, err + } + + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, tokenProvider) + if err != nil { + return false, err + } + + return true, nil +} + +func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) { + _, sharedConfig, _ := getAWSConfigSources(configs) + + var provider smithybearer.TokenProvider + + if sharedConfig.SSOSession != nil || (sharedConfig.SSORegion != "" && sharedConfig.SSOStartURL != "") { + ssoSession := sharedConfig.SSOSession + if ssoSession == nil { + // Fallback to legacy SSO session config parameters, if the + // sso-session section wasn't used. + ssoSession = &SSOSession{ + Name: sharedConfig.SSOStartURL, + SSORegion: sharedConfig.SSORegion, + SSOStartURL: sharedConfig.SSOStartURL, + } + } + + provider, err = resolveBearerAuthSSOTokenProvider( + ctx, cfg, ssoSession, configs) + } + + if err == nil && provider != nil { + cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache( + ctx, configs, provider) + } + + return err +} + +func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) { + ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err) + } + + var optFns []func(*ssocreds.SSOTokenProviderOptions) + if found { + optFns = append(optFns, ssoTokenProviderOptionsFn) + } + + cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name) + if err != nil { + return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err) + } + + client := ssooidc.NewFromConfig(*cfg) + provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...) + + return provider, nil +} + +// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go +// bearer/auth#TokenCache with the provided options if the provider is not +// already a TokenCache. +func wrapWithBearerAuthTokenCache( + ctx context.Context, + cfgs configs, + provider smithybearer.TokenProvider, + optFns ...func(*smithybearer.TokenCacheOptions), +) (smithybearer.TokenProvider, error) { + _, ok := provider.(*smithybearer.TokenCache) + if ok { + return provider, nil + } + + tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs) + if err != nil { + return nil, err + } + + opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns)) + opts = append(opts, func(o *smithybearer.TokenCacheOptions) { + o.RefreshBeforeExpires = 5 * time.Minute + o.RetrieveBearerTokenTimeout = 30 * time.Second + }) + opts = append(opts, optFns...) + if optionsFound { + opts = append(opts, tokenCacheConfigOptions) + } + + return smithybearer.NewTokenCache(provider, opts...), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 42904ed740..28705f47fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -29,25 +29,19 @@ var ( ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing ) -// resolveCredentials extracts a credential provider from slice of config sources. +// resolveCredentials extracts a credential provider from slice of config +// sources. // -// If an explict credential provider is not found the resolver will fallback to resolving -// credentials by extracting a credential provider from EnvConfig and SharedConfig. +// If an explicit credential provider is not found the resolver will fallback +// to resolving credentials by extracting a credential provider from EnvConfig +// and SharedConfig. func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { found, err := resolveCredentialProvider(ctx, cfg, configs) - if err != nil { + if found || err != nil { return err } - if found { - return nil - } - err = resolveCredentialChain(ctx, cfg, configs) - if err != nil { - return err - } - - return nil + return resolveCredentialChain(ctx, cfg, configs) } // resolveCredentialProvider extracts the first instance of Credentials from the @@ -61,12 +55,9 @@ func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) e // * credentialsProviderProvider func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) { credProvider, found, err := getCredentialsProvider(ctx, configs) - if err != nil { + if !found || err != nil { return false, err } - if !found { - return false, nil - } cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider) if err != nil { @@ -454,7 +445,7 @@ func wrapWithCredentialsCache( return provider, nil } - credCacheOptions, found, err := getCredentialsCacheOptionsProvider(ctx, cfgs) + credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs) if err != nil { return nil, err } @@ -462,7 +453,7 @@ func wrapWithCredentialsCache( // force allocation of a new slice if the additional options are // needed, to prevent overwriting the passed in slice of options. optFns = optFns[:len(optFns):len(optFns)] - if found { + if optionsFound { optFns = append(optFns, credCacheOptions) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index 4c43a165d4..48aa7a8cf0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -8,6 +8,7 @@ import ( "io" "io/ioutil" "os" + "os/user" "path/filepath" "strings" "time" @@ -19,9 +20,14 @@ import ( ) const ( - // Prefix to use for filtering profiles + // Prefix to use for filtering profiles. The profile prefix should only + // exist in the shared config file, not the credentials file. profilePrefix = `profile ` + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + // string equivalent for boolean endpointDiscoveryDisabled = `false` endpointDiscoveryEnabled = `true` @@ -42,10 +48,13 @@ const ( roleDurationSecondsKey = "duration_seconds" // optional // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + ssoRegionKey = "sso_region" + ssoStartURLKey = "sso_start_url" + ssoAccountIDKey = "sso_account_id" - ssoRegionKey = "sso_region" ssoRoleNameKey = "sso_role_name" - ssoStartURL = "sso_start_url" // Additional Config fields regionKey = `region` @@ -119,12 +128,34 @@ var DefaultSharedConfigFiles = []string{ DefaultSharedConfigFilename(), } -// DefaultSharedCredentialsFiles is a slice of the default shared credentials files that -// the will be used in order to load the SharedConfig. +// DefaultSharedCredentialsFiles is a slice of the default shared credentials +// files that the will be used in order to load the SharedConfig. var DefaultSharedCredentialsFiles = []string{ DefaultSharedCredentialsFilename(), } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type SSOSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *SSOSession) setFromIniSection(section ini.Section) error { + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURLKey) + + if s.SSORegion == "" || s.SSOStartURL == "" { + return fmt.Errorf( + "%v and %v are required parameters in sso-session section", + ssoRegionKey, ssoStartURLKey, + ) + } + + return nil +} + // SharedConfig represents the configuration fields of the SDK config files. type SharedConfig struct { Profile string @@ -144,10 +175,17 @@ type SharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *SSOSession + + // Legacy SSO session options + SSORegion string + SSOStartURL string + + // SSO fields not used SSOAccountID string - SSORegion string SSORoleName string - SSOStartURL string RoleARN string ExternalID string @@ -463,7 +501,6 @@ type LoadSharedConfigOptions struct { // // You can read more about shared config and credentials file location at // https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location -// func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) { var option LoadSharedConfigOptions for _, fn := range optFns { @@ -485,7 +522,7 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func } // check for profile prefix and drop duplicates or invalid profiles - err = processConfigSections(ctx, configSections, option.Logger) + err = processConfigSections(ctx, &configSections, option.Logger) if err != nil { return SharedConfig{}, err } @@ -497,12 +534,12 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func } // check for profile prefix and drop duplicates or invalid profiles - err = processCredentialsSections(ctx, credentialsSections, option.Logger) + err = processCredentialsSections(ctx, &credentialsSections, option.Logger) if err != nil { return SharedConfig{}, err } - err = mergeSections(configSections, credentialsSections) + err = mergeSections(&configSections, credentialsSections) if err != nil { return SharedConfig{}, err } @@ -516,53 +553,73 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func return cfg, nil } -func processConfigSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error { +func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { + skipSections := map[string]struct{}{} + for _, section := range sections.List() { - // drop profiles without prefix for config files - if !strings.HasPrefix(section, profilePrefix) && !strings.EqualFold(section, "default") { + if _, ok := skipSections[section]; ok { + continue + } + + // drop sections from config file that do not have expected prefixes. + switch { + case strings.HasPrefix(section, profilePrefix): + // Rename sections to remove "profile " prefixing to match with + // credentials file. If default is already present, it will be + // dropped. + newName, err := renameProfileSection(section, sections, logger) + if err != nil { + return fmt.Errorf("failed to rename profile section, %w", err) + } + skipSections[newName] = struct{}{} + + case strings.HasPrefix(section, ssoSectionPrefix): + case strings.EqualFold(section, "default"): + default: // drop this section, as invalid profile name sections.DeleteSection(section) if logger != nil { - logger.Logf(logging.Debug, - "A profile defined with name `%v` is ignored. For use within a shared configuration file, "+ - "a non-default profile must have `profile ` prefixed to the profile name.\n", + logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+ + "For use within a shared configuration file, "+ + "a non-default profile must have `profile ` "+ + "prefixed to the profile name.", section, ) } } } + return nil +} - // rename sections to remove `profile ` prefixing to match with credentials file. - // if default is already present, it will be dropped. - for _, section := range sections.List() { - if strings.HasPrefix(section, profilePrefix) { - v, ok := sections.GetSection(section) - if !ok { - return fmt.Errorf("error processing profiles within the shared configuration files") - } - - // delete section with profile as prefix - sections.DeleteSection(section) +func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) { + v, ok := sections.GetSection(section) + if !ok { + return "", fmt.Errorf("error processing profiles within the shared configuration files") + } - // set the value to non-prefixed name in sections. - section = strings.TrimPrefix(section, profilePrefix) - if sections.HasSection(section) { - oldSection, _ := sections.GetSection(section) - v.Logs = append(v.Logs, - fmt.Sprintf("A default profile prefixed with `profile ` found in %s, "+ - "overrided non-prefixed default profile from %s", v.SourceFile, oldSection.SourceFile)) - } + // delete section with profile as prefix + sections.DeleteSection(section) - // assign non-prefixed name to section - v.Name = section - sections.SetSection(section, v) - } + // set the value to non-prefixed name in sections. + section = strings.TrimPrefix(section, profilePrefix) + if sections.HasSection(section) { + oldSection, _ := sections.GetSection(section) + v.Logs = append(v.Logs, + fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+ + "overriding non-default profile from %s", + v.SourceFile, oldSection.SourceFile)) + sections.DeleteSection(section) } - return nil + + // assign non-prefixed name to section + v.Name = section + sections.SetSection(section, v) + + return section, nil } -func processCredentialsSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error { +func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error { for _, section := range sections.List() { // drop profiles with prefix for credential files if strings.HasPrefix(section, profilePrefix) { @@ -596,7 +653,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) { } // mergeSections into mergedSections - err = mergeSections(mergedSections, sections) + err = mergeSections(&mergedSections, sections) if err != nil { return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err} } @@ -606,7 +663,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) { } // mergeSections merges source section properties into destination section properties -func mergeSections(dst, src ini.Sections) error { +func mergeSections(dst *ini.Sections, src ini.Sections) error { for _, sectionName := range src.List() { srcSection, _ := src.GetSection(sectionName) @@ -680,6 +737,13 @@ func mergeSections(dst, src ini.Sections) error { useFIPSEndpointKey, defaultsModeKey, retryModeKey, + caBundleKey, + + ssoSessionNameKey, + ssoAccountIDKey, + ssoRegionKey, + ssoRoleNameKey, + ssoStartURLKey, } for i := range stringKeys { if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil { @@ -698,7 +762,7 @@ func mergeSections(dst, src ini.Sections) error { } // set srcSection on dst srcSection - dst = dst.SetSection(sectionName, dstSection) + *dst = dst.SetSection(sectionName, dstSection) } return nil @@ -769,7 +833,7 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile } } - // set config from the provided ini section + // set config from the provided INI section err := c.setFromIniSection(profile, section) if err != nil { return fmt.Errorf("error fetching config from profile, %v, %w", profile, err) @@ -832,11 +896,37 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile c.Source = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if c.SSOSessionName != "" { + c.SSOSession, err = getSSOSession(c.SSOSessionName, sections, logger) + if err != nil { + return err + } + } + return nil } +func getSSOSession(name string, sections ini.Sections, logger logging.Logger) (*SSOSession, error) { + section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(name)) + if !ok { + return nil, fmt.Errorf("failed to find SSO session section, %v", name) + } + + var ssoSession SSOSession + if err := ssoSession.setFromIniSection(section); err != nil { + return nil, fmt.Errorf("failed to load SSO session %v, %w", name, err) + } + ssoSession.Name = name + + return &ssoSession, nil +} + // setFromIniSection loads the configuration from the profile section defined in -// the provided ini file. A SharedConfig pointer type value is used so that +// the provided INI file. A SharedConfig pointer type value is used so that // multiple config file loadings can be chained. // // Only loads complete logically grouped values, and will not set fields in cfg @@ -871,10 +961,16 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er updateString(&c.Region, section, regionKey) // AWS Single Sign-On (AWS SSO) - updateString(&c.SSOAccountID, section, ssoAccountIDKey) + // SSO session options + updateString(&c.SSOSessionName, section, ssoSessionNameKey) + + // Legacy SSO session options updateString(&c.SSORegion, section, ssoRegionKey) + updateString(&c.SSOStartURL, section, ssoStartURLKey) + + // SSO fields not used + updateString(&c.SSOAccountID, section, ssoAccountIDKey) updateString(&c.SSORoleName, section, ssoRoleNameKey) - updateString(&c.SSOStartURL, section, ssoStartURL) if section.Has(roleDurationSecondsKey) { d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second @@ -1004,20 +1100,13 @@ func (c *SharedConfig) validateSSOConfiguration() error { } var missing []string - if len(c.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } if len(c.SSORegion) == 0 { missing = append(missing, ssoRegionKey) } - if len(c.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - if len(c.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) + missing = append(missing, ssoStartURLKey) } if len(missing) > 0 { @@ -1145,8 +1234,18 @@ func (e CredentialRequiresARNError) Error() string { func userHomeDir() string { // Ignore errors since we only care about Windows and *nix. - homedir, _ := os.UserHomeDir() - return homedir + home, _ := os.UserHomeDir() + + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home } func oneOrNone(bs ...bool) bool { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index e98bdbdcc1..2538757fba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,80 @@ +# v1.12.18 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.17 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.16 (2022-08-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.15 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-07-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-05-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-05-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-04-25) + +* **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.2 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go index ae25c3a489..72214bf405 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go @@ -1,14 +1,14 @@ // Package ec2rolecreds provides the credentials provider implementation for // retrieving AWS credentials from Amazon EC2 Instance Roles via Amazon EC2 IMDS. // -// Concurrency and caching +// # Concurrency and caching // // The Provider is not safe to be used concurrently, and does not provide any // caching of credentials retrieved. You should wrap the Provider with a // `aws.CredentialsCache` to provide concurrency safety, and caching of // credentials. // -// Loading credentials with the SDK's AWS Config +// # Loading credentials with the SDK's AWS Config // // The EC2 Instance role credentials provider will automatically be the resolved // credential provider int he credential chain if no other credential provider is @@ -18,10 +18,10 @@ // role for credentials, you specify a `credentials_source` property in the config // profile the SDK will load. // -// [default] -// credential_source = Ec2InstanceMetadata +// [default] +// credential_source = Ec2InstanceMetadata // -// Loading credentials with the Provider directly +// # Loading credentials with the Provider directly // // Another way to use the EC2 Instance role credentials provider is to create it // directly and assign it as the credentials provider for an API client. @@ -30,28 +30,28 @@ // it with the CredentialsCache before assigning the provider to the Amazon S3 API // client's Credentials option. // -// provider := imds.New(imds.Options{}) +// provider := imds.New(imds.Options{}) // -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) // // If you need more control, you can set the configuration options on the // credentials provider using the imds.Options type to configure the EC2 IMDS // API Client and ExpiryWindow of the retrieved credentials. // -// provider := imds.New(imds.Options{ -// // See imds.Options type's documentation for more options available. -// Client: imds.New(Options{ -// HTTPClient: customHTTPClient, -// }), +// provider := imds.New(imds.Options{ +// // See imds.Options type's documentation for more options available. +// Client: imds.New(Options{ +// HTTPClient: customHTTPClient, +// }), // -// // Modify how soon credentials expire prior to their original expiry time. -// ExpiryWindow: 5 * time.Minute, -// }) +// // Modify how soon credentials expire prior to their original expiry time. +// ExpiryWindow: 5 * time.Minute, +// }) // -// EC2 IMDS API Client +// # EC2 IMDS API Client // // See the github.com/aws/aws-sdk-go-v2/feature/ec2/imds module for more details on // configuring the client, and options available. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go index aeb79ac3c9..5c699f1665 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -33,9 +33,9 @@ type GetMetadataAPIClient interface { // // The New function must be used to create the with a custom EC2 IMDS client. // -// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ -// o.Client = imds.New(imds.Options{/* custom options */}) -// }) +// p := &ec2rolecreds.New(func(o *ec2rolecreds.Options{ +// o.Client = imds.New(imds.Options{/* custom options */}) +// }) type Provider struct { options Options } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 40cd7addb3..adc7fc6b00 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -7,26 +7,29 @@ // // Static credentials will never expire once they have been retrieved. The format // of the static credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// } +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } // // Refreshable credentials will expire within the "ExpiryWindow" of the Expiration // value in the response. The format of the refreshable credentials response: -// { -// "AccessKeyId" : "MUA...", -// "SecretAccessKey" : "/7PC5om....", -// "Token" : "AQoDY....=", -// "Expiration" : "2016-02-25T06:03:31Z" -// } +// +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } // // Errors should be returned in the following format and only returned with 400 // or 500 HTTP status codes. -// { -// "code": "ErrorCode", -// "message": "Helpful error message." -// } +// +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } package endpointcreds import ( diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 5705e8653a..1921a7dc16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.2" +const goModuleVersion = "1.12.18" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go index d56dd8260d..a3137b8fa9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/doc.go @@ -7,14 +7,14 @@ // option, you should make sure that the config file is as locked down as possible // using security best practices for your operating system. // -// Concurrency and caching +// # Concurrency and caching // // The Provider is not safe to be used concurrently, and does not provide any // caching of credentials retrieved. You should wrap the Provider with a // `aws.CredentialsCache` to provide concurrency safety, and caching of // credentials. // -// Loading credentials with the SDKs AWS Config +// # Loading credentials with the SDKs AWS Config // // You can use credentials from a AWS shared config `credential_process` in a // variety of ways. @@ -24,20 +24,20 @@ // called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable // (e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. // -// [default] -// credential_process = /command/to/call +// [default] +// credential_process = /command/to/call // // Loading configuration using external will use the credential process to // retrieve credentials. NOTE: If there are credentials in the profile you are // using, the credential process will not be used. // -// // Initialize a session to load credentials. -// cfg, _ := config.LoadDefaultConfig(context.TODO()) +// // Initialize a session to load credentials. +// cfg, _ := config.LoadDefaultConfig(context.TODO()) // -// // Create S3 service client to use the credentials. -// svc := s3.NewFromConfig(cfg) +// // Create S3 service client to use the credentials. +// svc := s3.NewFromConfig(cfg) // -// Loading credentials with the Provider directly +// # Loading credentials with the Provider directly // // Another way to use the credentials process provider is by using the // `NewProvider` constructor to create the provider and providing a it with a @@ -47,46 +47,46 @@ // it with the CredentialsCache before assigning the provider to the Amazon S3 API // client's Credentials option. // -// // Create credentials using the Provider. -// provider := processcreds.NewProvider("/path/to/command") +// // Create credentials using the Provider. +// provider := processcreds.NewProvider("/path/to/command") // -// // Create the service client value configured for credentials. -// svc := s3.New(s3.Options{ -// Credentials: aws.NewCredentialsCache(provider), -// }) +// // Create the service client value configured for credentials. +// svc := s3.New(s3.Options{ +// Credentials: aws.NewCredentialsCache(provider), +// }) // // If you need more control, you can set any configurable options in the // credentials using one or more option functions. // -// provider := processcreds.NewProvider("/path/to/command", -// func(o *processcreds.Options) { -// // Override the provider's default timeout -// o.Timeout = 2 * time.Minute -// }) +// provider := processcreds.NewProvider("/path/to/command", +// func(o *processcreds.Options) { +// // Override the provider's default timeout +// o.Timeout = 2 * time.Minute +// }) // // You can also use your own `exec.Cmd` value by satisfying a value that satisfies // the `NewCommandBuilder` interface and use the `NewProviderCommand` constructor. // -// // Create an exec.Cmd -// cmdBuilder := processcreds.NewCommandBuilderFunc( -// func(ctx context.Context) (*exec.Cmd, error) { -// cmd := exec.CommandContext(ctx, -// "customCLICommand", -// "-a", "argument", -// ) -// cmd.Env = []string{ -// "ENV_VAR_FOO=value", -// "ENV_VAR_BAR=other_value", -// } -// -// return cmd, nil -// }, -// ) -// -// // Create credentials using your exec.Cmd and custom timeout -// provider := processcreds.NewProviderCommand(cmdBuilder, -// func(opt *processcreds.Provider) { -// // optionally override the provider's default timeout -// opt.Timeout = 1 * time.Second -// }) +// // Create an exec.Cmd +// cmdBuilder := processcreds.NewCommandBuilderFunc( +// func(ctx context.Context) (*exec.Cmd, error) { +// cmd := exec.CommandContext(ctx, +// "customCLICommand", +// "-a", "argument", +// ) +// cmd.Env = []string{ +// "ENV_VAR_FOO=value", +// "ENV_VAR_BAR=other_value", +// } +// +// return cmd, nil +// }, +// ) +// +// // Create credentials using your exec.Cmd and custom timeout +// provider := processcreds.NewProviderCommand(cmdBuilder, +// func(opt *processcreds.Provider) { +// // optionally override the provider's default timeout +// opt.Timeout = 1 * time.Second +// }) package processcreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go index 2f396c0a11..43e5676d34 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go @@ -1,63 +1,71 @@ -// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// Package ssocreds provides a credential provider for retrieving temporary AWS +// credentials using an SSO access token. // -// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider -// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by -// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in -// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// IMPORTANT: The provider in this package does not initiate or perform the AWS +// SSO login flow. The SDK provider expects that you have already performed the +// SSO login flow using AWS CLI using the "aws sso login" command, or by some +// other mechanism. The provider must find a valid non-expired access token for +// the AWS SSO user portal URL in ~/.aws/sso/cache. If a cached token is not +// found, it is expired, or the file is malformed an error will be returned. // -// Loading AWS SSO credentials with the AWS shared configuration file +// # Loading AWS SSO credentials with the AWS shared configuration file // // You can use configure AWS SSO credentials from the AWS shared configuration file by // providing the specifying the required keys in the profile: // -// sso_account_id -// sso_region -// sso_role_name -// sso_start_url +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url // -// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target -// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// For example, the following defines a profile "devsso" and specifies the AWS +// SSO parameters that defines the target account, role, sign-on portal, and +// the region where the user portal is located. Note: all SSO arguments must be // provided, or an error will be returned. // -// [profile devsso] -// sso_start_url = https://my-sso-portal.awsapps.com/start -// sso_role_name = SSOReadOnlyRole -// sso_region = us-east-1 -// sso_account_id = 123456789012 +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 // -// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to -// retrieve credentials. For example: +// Using the config module, you can load the AWS SDK shared configuration, and +// specify that this profile be used to retrieve credentials. For example: // -// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) -// if err != nil { -// return err -// } +// config, err := config.LoadDefaultConfig(context.TODO(), config.WithSharedConfigProfile("devsso")) +// if err != nil { +// return err +// } // -// Programmatically loading AWS SSO credentials directly +// # Programmatically loading AWS SSO credentials directly // -// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information -// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// You can programmatically construct the AWS SSO Provider in your application, +// and provide the necessary information to load and retrieve temporary +// credentials using an access token from ~/.aws/sso/cache. // -// client := sso.NewFromConfig(cfg) +// client := sso.NewFromConfig(cfg) // -// var provider aws.CredentialsProvider -// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start") +// var provider aws.CredentialsProvider +// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start") // -// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time -// provider = aws.NewCredentialsCache(provider) +// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time +// provider = aws.NewCredentialsCache(provider) // -// credentials, err := provider.Retrieve(context.TODO()) -// if err != nil { -// return err -// } +// credentials, err := provider.Retrieve(context.TODO()) +// if err != nil { +// return err +// } // -// It is important that you wrap the Provider with aws.CredentialsCache if you are programmatically constructing the -// provider directly. This prevents your application from accessing the cached access token and requesting new +// It is important that you wrap the Provider with aws.CredentialsCache if you +// are programmatically constructing the provider directly. This prevents your +// application from accessing the cached access token and requesting new // credentials each time the credentials are used. // -// Additional Resources +// # Additional Resources // -// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// Configuring the AWS CLI to use AWS Single Sign-On: +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html // -// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +// AWS Single Sign-On User Guide: +// https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go deleted file mode 100644 index d4df39a7a2..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("HOME") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go deleted file mode 100644 index eb48f61e5b..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/os_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package ssocreds - -import "os" - -func getHomeDirectory() string { - return os.Getenv("USERPROFILE") -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go deleted file mode 100644 index 279df7a131..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/provider.go +++ /dev/null @@ -1,184 +0,0 @@ -package ssocreds - -import ( - "context" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "strings" - "time" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/internal/sdk" - "github.com/aws/aws-sdk-go-v2/service/sso" -) - -// ProviderName is the name of the provider used to specify the source of credentials. -const ProviderName = "SSOProvider" - -var defaultCacheLocation func() string - -func defaultCacheLocationImpl() string { - return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") -} - -func init() { - defaultCacheLocation = defaultCacheLocationImpl -} - -// GetRoleCredentialsAPIClient is a API client that implements the GetRoleCredentials operation. -type GetRoleCredentialsAPIClient interface { - GetRoleCredentials(ctx context.Context, params *sso.GetRoleCredentialsInput, optFns ...func(*sso.Options)) (*sso.GetRoleCredentialsOutput, error) -} - -// Options is the Provider options structure. -type Options struct { - // The Client which is configured for the AWS Region where the AWS SSO user portal is located. - Client GetRoleCredentialsAPIClient - - // The AWS account that is assigned to the user. - AccountID string - - // The role name that is assigned to the user. - RoleName string - - // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. - StartURL string -} - -// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. -type Provider struct { - options Options -} - -// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured -// for the AWS Region where the AWS SSO user portal is located. -func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { - options := Options{ - Client: client, - AccountID: accountID, - RoleName: roleName, - StartURL: startURL, - } - - for _, fn := range optFns { - fn(&options) - } - - return &Provider{ - options: options, - } -} - -// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal -// by exchanging the accessToken present in ~/.aws/sso/cache. -func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { - tokenFile, err := loadTokenFile(p.options.StartURL) - if err != nil { - return aws.Credentials{}, err - } - - output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, - AccountId: &p.options.AccountID, - RoleName: &p.options.RoleName, - }) - if err != nil { - return aws.Credentials{}, err - } - - return aws.Credentials{ - AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), - SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), - SessionToken: aws.ToString(output.RoleCredentials.SessionToken), - Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), - CanExpire: true, - Source: ProviderName, - }, nil -} - -func getCacheFileName(url string) (string, error) { - hash := sha1.New() - _, err := hash.Write([]byte(url)) - if err != nil { - return "", err - } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %w", err) - } - - *r = rfc3339(parse) - - return nil -} - -type token struct { - AccessToken string `json:"accessToken"` - ExpiresAt rfc3339 `json:"expiresAt"` - Region string `json:"region,omitempty"` - StartURL string `json:"startUrl,omitempty"` -} - -func (t token) Expired() bool { - return sdk.NowTime().Round(0).After(time.Time(t.ExpiresAt)) -} - -// InvalidTokenError is the error type that is returned if loaded token has expired or is otherwise invalid. -// To refresh the SSO session run aws sso login with the corresponding profile. -type InvalidTokenError struct { - Err error -} - -func (i *InvalidTokenError) Unwrap() error { - return i.Err -} - -func (i *InvalidTokenError) Error() string { - const msg = "the SSO session has expired or is invalid" - if i.Err == nil { - return msg - } - return msg + ": " + i.Err.Error() -} - -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, &InvalidTokenError{Err: err} - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) - if err != nil { - return token{}, &InvalidTokenError{Err: err} - } - - if err := json.Unmarshal(fileBytes, &t); err != nil { - return token{}, &InvalidTokenError{Err: err} - } - - if len(t.AccessToken) == 0 { - return token{}, &InvalidTokenError{} - } - - if t.Expired() { - return token{}, &InvalidTokenError{Err: fmt.Errorf("access token is expired")} - } - - return t, nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 0000000000..40743f0d70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,233 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/internal/sdk" +) + +var osUserHomeDur = os.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir, err := osUserHomeDur() + if err != nil { + return "", fmt.Errorf("unable to get USER's home directory for cached token, %w", err) + } + + hash := sha1.New() + if _, err = hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type token struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +func (t token) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +func (t *token) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %w", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (token, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return token{}, fmt.Errorf("failed to read cached SSO token file, %w", err) + } + + var t token + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, fmt.Errorf("failed to parse cached SSO token file, %w", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return token{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t token, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(sdk.NowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %w", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t token) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %w", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %w", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %w", err) + } + + return nil +} + +type rfc3339 time.Time + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %w", err) + } + + return rfc3339(parsed), nil +} + +func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) { + var value string + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // unquoting rules. + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + + return nil +} + +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go new file mode 100644 index 0000000000..bd7603bbc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -0,0 +1,136 @@ +package ssocreds + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/sso" +) + +// ProviderName is the name of the provider used to specify the source of +// credentials. +const ProviderName = "SSOProvider" + +// GetRoleCredentialsAPIClient is a API client that implements the +// GetRoleCredentials operation. +type GetRoleCredentialsAPIClient interface { + GetRoleCredentials(context.Context, *sso.GetRoleCredentialsInput, ...func(*sso.Options)) ( + *sso.GetRoleCredentialsOutput, error, + ) +} + +// Options is the Provider options structure. +type Options struct { + // The Client which is configured for the AWS Region where the AWS SSO user + // portal is located. + Client GetRoleCredentialsAPIClient + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) + // user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string +} + +// Provider is an AWS credential provider that retrieves temporary AWS +// credentials by exchanging an SSO login token. +type Provider struct { + options Options + + cachedTokenFilepath string +} + +// New returns a new AWS Single Sign-On (AWS SSO) credential provider. The +// provided client is expected to be configured for the AWS Region where the +// AWS SSO user portal is located. +func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL string, optFns ...func(options *Options)) *Provider { + options := Options{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(&options) + } + + return &Provider{ + options: options, + cachedTokenFilepath: options.CachedTokenFilepath, + } +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon +// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present +// in ~/.aws/sso/cache. +func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { + if p.cachedTokenFilepath == "" { + cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + p.cachedTokenFilepath = cachedTokenFilepath + } + + tokenFile, err := loadCachedToken(p.cachedTokenFilepath) + if err != nil { + return aws.Credentials{}, &InvalidTokenError{Err: err} + } + + if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) { + return aws.Credentials{}, &InvalidTokenError{} + } + + output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: &tokenFile.AccessToken, + AccountId: &p.options.AccountID, + RoleName: &p.options.RoleName, + }) + if err != nil { + return aws.Credentials{}, err + } + + return aws.Credentials{ + AccessKeyID: aws.ToString(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.ToString(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.ToString(output.RoleCredentials.SessionToken), + CanExpire: true, + Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(), + Source: ProviderName, + }, nil +} + +// InvalidTokenError is the error type that is returned if loaded token has +// expired or is otherwise invalid. To refresh the SSO session run AWS SSO +// login with the corresponding profile. +type InvalidTokenError struct { + Err error +} + +func (i *InvalidTokenError) Unwrap() error { + return i.Err +} + +func (i *InvalidTokenError) Error() string { + const msg = "the SSO session has expired or is invalid" + if i.Err == nil { + return msg + } + return msg + ": " + i.Err.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go new file mode 100644 index 0000000000..7f4fc54677 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_token_provider.go @@ -0,0 +1,147 @@ +package ssocreds + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/service/ssooidc" + "github.com/aws/smithy-go/auth/bearer" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(context.Context, *ssooidc.CreateTokenInput, ...func(*ssooidc.Options)) ( + *ssooidc.CreateTokenOutput, error, + ) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The set of API Client options to be applied when invoking the + // CreateToken operation. + ClientOptions []func(*ssooidc.Options) + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides an utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The SSOTokenProvider is not safe to use concurrently. It must be wrapped in +// a utility such as smithy-go's auth/bearer#TokenCache. The SDK's +// config.LoadDefaultConfig will automatically wrap the SSOTokenProvider with +// the smithy-go TokenCache, if the external configuration loaded configured +// for an SSO session. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +var _ bearer.TokenProvider = (*SSOTokenProvider)(nil) + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p SSOTokenProvider) RetrieveBearerToken(ctx context.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && sdk.NowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(ctx, cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %w", err) + } + } + + expiresAt := aws.ToTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p SSOTokenProvider) refreshToken(ctx context.Context, cachedToken token) (token, error) { + if cachedToken.ClientSecret == "" || cachedToken.ClientID == "" || cachedToken.RefreshToken == "" { + return token{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(ctx, &ssooidc.CreateTokenInput{ + ClientId: &cachedToken.ClientID, + ClientSecret: &cachedToken.ClientSecret, + RefreshToken: &cachedToken.RefreshToken, + GrantType: aws.String("refresh_token"), + }, p.options.ClientOptions...) + if err != nil { + return token{}, fmt.Errorf("unable to refresh SSO token, %w", err) + } + + expiresAt := sdk.NowTime().Add(time.Duration(createResult.ExpiresIn) * time.Second) + + cachedToken.AccessToken = aws.ToString(createResult.AccessToken) + cachedToken.ExpiresAt = (*rfc3339)(&expiresAt) + cachedToken.RefreshToken = aws.ToString(createResult.RefreshToken) + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return token{}, fmt.Errorf("failed to stat cached SSO token file %w", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, cachedToken, fileInfo.Mode()); err != nil { + return token{}, fmt.Errorf("unable to cache refreshed SSO token, %w", err) + } + + return cachedToken, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index f8c1ae6acb..289707b6de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -8,31 +8,31 @@ // ensure synchronous usage of the AssumeRoleProvider if the value is shared // between multiple Credentials or service clients. // -// Assume Role +// # Assume Role // // To assume an IAM role using STS with the SDK you can create a new Credentials // with the SDKs's stscreds package. // -// // Initial credentials loaded from SDK's default credential chain. Such as -// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance -// // Role. These credentials will be used to to make the STS Assume Role API. -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } +// // Initial credentials loaded from SDK's default credential chain. Such as +// // the environment, shared credentials (~/.aws/credentials), or EC2 Instance +// // Role. These credentials will be used to to make the STS Assume Role API. +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } // -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN. -// stsSvc := sts.NewFromConfig(cfg) -// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN. +// stsSvc := sts.NewFromConfig(cfg) +// creds := stscreds.NewAssumeRoleProvider(stsSvc, "myRoleArn") // -// cfg.Credentials = aws.NewCredentialsCache(creds) +// cfg.Credentials = aws.NewCredentialsCache(creds) // -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) // -// Assume Role with custom MFA Token provider +// # Assume Role with custom MFA Token provider // // To assume an IAM role with a MFA token you can either specify a custom MFA // token provider or use the SDK's built in StdinTokenProvider that will prompt @@ -43,29 +43,29 @@ // With a custom token provider, the provider is responsible for refreshing the // token code when called. // -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } // -// staticTokenProvider := func() (string, error) { -// return someTokenCode, nil -// } +// staticTokenProvider := func() (string, error) { +// return someTokenCode, nil +// } // -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = staticTokenProvider -// }) +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = staticTokenProvider +// }) // -// cfg.Credentials = aws.NewCredentialsCache(creds) +// cfg.Credentials = aws.NewCredentialsCache(creds) // -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) // -// Assume Role with MFA Token Provider +// # Assume Role with MFA Token Provider // // To assume an IAM role with MFA for longer running tasks where the credentials // may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -80,23 +80,23 @@ // have undesirable results as the StdinTokenProvider will not be synchronized. A // single Credentials with an AssumeRoleProvider can be shared safely. // -// cfg, err := config.LoadDefaultConfig(context.TODO()) -// if err != nil { -// panic(err) -// } +// cfg, err := config.LoadDefaultConfig(context.TODO()) +// if err != nil { +// panic(err) +// } // -// // Create the credentials from AssumeRoleProvider to assume the role -// // referenced by the "myRoleARN" ARN using the MFA token code provided. -// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { -// o.SerialNumber = aws.String("myTokenSerialNumber") -// o.TokenProvider = stscreds.StdinTokenProvider -// }) +// // Create the credentials from AssumeRoleProvider to assume the role +// // referenced by the "myRoleARN" ARN using the MFA token code provided. +// creds := stscreds.NewAssumeRoleProvider(sts.NewFromConfig(cfg), "myRoleArn", func(o *stscreds.AssumeRoleOptions) { +// o.SerialNumber = aws.String("myTokenSerialNumber") +// o.TokenProvider = stscreds.StdinTokenProvider +// }) // -// cfg.Credentials = aws.NewCredentialsCache(creds) +// cfg.Credentials = aws.NewCredentialsCache(creds) // -// // Create service client value configured for credentials -// // from assumed role. -// svc := s3.NewFromConfig(cfg) +// // Create service client value configured for credentials +// // from assumed role. +// svc := s3.NewFromConfig(cfg) package stscreds import ( @@ -136,8 +136,13 @@ type AssumeRoleAPIClient interface { AssumeRole(ctx context.Context, params *sts.AssumeRoleInput, optFns ...func(*sts.Options)) (*sts.AssumeRoleOutput, error) } -// DefaultDuration is the default amount of time in minutes that the credentials -// will be valid for. +// DefaultDuration is the default amount of time in minutes that the +// credentials will be valid for. This value is only used by AssumeRoleProvider +// for specifying the default expiry duration of an assume role. +// +// Other providers such as WebIdentityRoleProvider do not use this value, and +// instead rely on STS API's default parameter handing to assign a default +// value. var DefaultDuration = time.Duration(15) * time.Minute // AssumeRoleProvider retrieves temporary credentials from the STS service, and diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index 7854a3228c..ddaf6df6ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "strconv" + "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" @@ -45,6 +46,19 @@ type WebIdentityRoleOptions struct { // Session name, if you wish to uniquely identify this session. RoleSessionName string + // Expiry duration of the STS credentials. STS will assign a default expiry + // duration if this value is unset. This is different from the Duration + // option of AssumeRoleProvider, which automatically assigns 15 minutes if + // Duration is unset. + // + // See the STS AssumeRoleWithWebIdentity API reference guide for more + // information on defaults. + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + Duration time.Duration + + // An IAM policy in JSON format that you want to use as an inline session policy. + Policy *string + // The Amazon Resource Names (ARNs) of the IAM managed policies that you // want to use as managed session policies. The policies must exist in the // same account as the role. @@ -100,12 +114,21 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(sdk.NowTime().UnixNano(), 10) } - resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, &sts.AssumeRoleWithWebIdentityInput{ + input := &sts.AssumeRoleWithWebIdentityInput{ PolicyArns: p.options.PolicyARNs, RoleArn: &p.options.RoleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), - }, func(options *sts.Options) { + } + if p.options.Duration != 0 { + // If set use the value, otherwise STS will assign a default expiration duration. + input.DurationSeconds = aws.Int32(int32(p.options.Duration / time.Second)) + } + if p.options.Policy != nil { + input.Policy = p.options.Policy + } + + resp, err := p.options.Client.AssumeRoleWithWebIdentity(ctx, input, func(options *sts.Options) { options.Retryer = retry.AddWithErrorCodes(options.Retryer, invalidIdentityTokenExceptionCode) }) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/doc.go index 81644bf8b7..944feac553 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/doc.go @@ -2,16 +2,16 @@ // // aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language. // -// Getting started +// # Getting started // // The best way to get started working with the SDK is to use `go get` to add the // SDK and desired service clients to your Go dependencies explicitly. // -// go get github.com/aws/aws-sdk-go-v2 +// go get github.com/aws/aws-sdk-go-v2 // go get github.com/aws/aws-sdk-go-v2/config // go get github.com/aws/aws-sdk-go-v2/service/dynamodb // -// Hello AWS +// # Hello AWS // // This example shows how you can use the v2 SDK to make an API request using the // SDK's Amazon DynamoDB client. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 1baeba4261..cccf824f5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,51 @@ +# v1.12.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 076de88c0d..94348f9630 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.3" +const goModuleVersion = "1.12.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 856aec912d..5f49279412 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,51 @@ +# v1.1.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.19 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.18 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.17 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.16 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.15 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.14 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.13 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.12 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.11 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.10 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.9 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index d17db42076..cef7ead9d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.9" +const goModuleVersion = "1.1.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 3473a98235..12f043a9cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,51 @@ +# v2.4.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 88f74540a5..4656096973 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.3" +const goModuleVersion = "2.4.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 81865856ba..7387c6c106 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,52 @@ +# v1.3.22 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2022-05-17) + +* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.10 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go index 1e55bbd07b..fdd5321b4c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go @@ -13,30 +13,31 @@ // } // // Below is the BNF that describes this parser -// Grammar: -// stmt -> section | stmt' -// stmt' -> epsilon | expr -// expr -> value (stmt)* | equal_expr (stmt)* -// equal_expr -> value ( ':' | '=' ) equal_expr' -// equal_expr' -> number | string | quoted_string -// quoted_string -> " quoted_string' -// quoted_string' -> string quoted_string_end -// quoted_string_end -> " -// -// section -> [ section' -// section' -> section_value section_close -// section_value -> number | string_subset | boolean | quoted_string_subset -// quoted_string_subset -> " quoted_string_subset' -// quoted_string_subset' -> string_subset quoted_string_end -// quoted_string_subset -> " -// section_close -> ] -// -// value -> number | string_subset | boolean -// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? -// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? -// -// SkipState will skip (NL WS)+ -// -// comment -> # comment' | ; comment' -// comment' -> epsilon | value +// +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value package ini diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go deleted file mode 100644 index 6e545b63bc..0000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package ini - -import ( - "bytes" -) - -func Fuzz(data []byte) int { - b := bytes.NewReader(data) - - if _, err := Parse(b); err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 7dced4ec02..51bd239b61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.10" +const goModuleVersion = "1.3.22" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE index 6a66aea5ea..fe6a62006a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/LICENSE @@ -14,7 +14,7 @@ distribution. contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT @@ -25,3 +25,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go new file mode 100644 index 0000000000..cb70616e80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/docs.go @@ -0,0 +1,7 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight +package singleflight diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go index 14ad0c5891..e8a1b17d56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight/singleflight.go @@ -2,11 +2,44 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package singleflight provides a duplicate function call suppression -// mechanism. package singleflight -import "sync" +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} // call is an in-flight or completed singleflight.Do call type call struct { @@ -57,6 +90,12 @@ func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, e c.dups++ g.mu.Unlock() c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } return c.val, c.err, true } c := new(call) @@ -70,6 +109,8 @@ func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, e // DoChan is like Do but returns a channel that will receive the // results when they are ready. +// +// The returned channel will not be closed. func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { ch := make(chan Result, 1) g.mu.Lock() @@ -94,17 +135,66 @@ func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result // doCall handles the single call for a key. func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { - c.val, c.err = fn() - c.wg.Done() - - g.mu.Lock() - if !c.forgotten { - delete(g.m, key) - } - for _, ch := range c.chans { - ch <- Result{c.val, c.err, c.dups > 0} + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true } - g.mu.Unlock() } // Forget tells the singleflight to forget about a key. Future calls diff --git a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh index 8a2aea99e2..81a8361275 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh +++ b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh @@ -1,4 +1,4 @@ -#1/usr/bin/env bash +#!/usr/bin/env bash PROJECT_DIR="" SDK_SOURCE_DIR=$(cd `dirname $0` && pwd) @@ -30,7 +30,7 @@ while getopts "hs:d:" options; do done if [ "$PROJECT_DIR" != "" ]; then - cd $PROJECT_DIR || exit + cd "$PROJECT_DIR" || exit fi go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml index bde8e1391b..7a2b1680f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -1,7 +1,8 @@ [dependencies] - "github.com/aws/smithy-go" = "v1.11.2" - "github.com/google/go-cmp" = "v0.5.7" + "github.com/aws/aws-sdk-go" = "v1.44.28" + "github.com/aws/smithy-go" = "v1.13.2" + "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd" @@ -10,6 +11,9 @@ [modules."."] metadata_package = "aws" + [modules.codegen] + no_tag = true + [modules."example/service/dynamodb/createTable"] no_tag = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index d779c398de..91c877b299 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,51 @@ +# v1.9.15 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.14 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.13 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.12 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.11 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.10 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.9 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.8 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.7 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.6 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index f30bfff65c..4ebc037b7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.3" +const goModuleVersion = "1.9.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md new file mode 100644 index 0000000000..a795472868 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/CHANGELOG.md @@ -0,0 +1,182 @@ +# v1.18.9 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2022-08-30) + +* No change notes available for this release. + +# v1.18.6 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2022-08-22) + +* No change notes available for this release. + +# v1.18.4 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2022-07-18) + +* **Feature**: Added support for the SM2 KeySpec in China Partition Regions + +# v1.17.5 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.4 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2022-05-17) + +* **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.0 (2022-04-19) + +* **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes + +# v1.16.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-01-07) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2021-12-21) + +* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens. +* **Feature**: Updated to latest service endpoints + +# v1.11.1 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2021-11-19) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2021-11-12) + +* **Feature**: Service clients now support custom endpoints that have an initial URI path defined. + +# v1.9.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Feature**: Updated service to latest API model. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-10-21) + +* **Feature**: API client updated +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2021-09-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-09-02) + +* **Feature**: API client updated + +# v1.5.0 (2021-08-27) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-06-25) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-06-04) + +* No change notes available for this release. + +# v1.3.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go new file mode 100644 index 0000000000..e56ae816b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_client.go @@ -0,0 +1,434 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "KMS" +const ServiceAPIVersion = "2014-11-01" + +// Client provides the API client to make operations call for AWS Key Management +// Service. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "kms", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go new file mode 100644 index 0000000000..ee03cf109b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CancelKeyDeletion.go @@ -0,0 +1,144 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Cancels the deletion of a KMS key. When this operation succeeds, the key state +// of the KMS key is Disabled. To enable the KMS key, use EnableKey. For more +// information about scheduling and canceling deletion of a KMS key, see Deleting +// KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in +// the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:CancelKeyDeletion +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: ScheduleKeyDeletion +func (c *Client) CancelKeyDeletion(ctx context.Context, params *CancelKeyDeletionInput, optFns ...func(*Options)) (*CancelKeyDeletionOutput, error) { + if params == nil { + params = &CancelKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CancelKeyDeletion", params, optFns, c.addOperationCancelKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CancelKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CancelKeyDeletionInput struct { + + // Identifies the KMS key whose deletion is being canceled. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type CancelKeyDeletionOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key whose deletion is canceled. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCancelKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCancelKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCancelKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCancelKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CancelKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go new file mode 100644 index 0000000000..b130992137 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ConnectCustomKeyStore.go @@ -0,0 +1,164 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Connects or reconnects a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// to its associated CloudHSM cluster. The custom key store must be connected +// before you can create KMS keys in the key store or use the KMS keys it contains. +// You can disconnect and reconnect a custom key store at any time. To connect a +// custom key store, its associated CloudHSM cluster must have at least one active +// HSM. To get the number of active HSMs in a cluster, use the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. To add HSMs to the cluster, use the CreateHsm +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. Also, the kmsuser crypto user +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// (CU) must not be logged into the cluster. This prevents KMS from using this +// account to log in. The connection process can take an extended amount of time to +// complete; up to 20 minutes. This operation starts the connection process, but it +// does not wait for it to complete. When it succeeds, this operation quickly +// returns an HTTP 200 response and a JSON object with no properties. However, this +// response does not indicate that the custom key store is connected. To get the +// connection state of the custom key store, use the DescribeCustomKeyStores +// operation. During the connection process, KMS finds the CloudHSM cluster that is +// associated with the custom key store, creates the connection infrastructure, +// connects to the cluster, logs into the CloudHSM client as the kmsuser CU, and +// rotates its password. The ConnectCustomKeyStore operation might fail for various +// reasons. To find the reason, use the DescribeCustomKeyStores operation and see +// the ConnectionErrorCode in the response. For help interpreting the +// ConnectionErrorCode, see CustomKeyStoresListEntry. To fix the failure, use the +// DisconnectCustomKeyStore operation to disconnect the custom key store, correct +// the error, use the UpdateCustomKeyStore operation if necessary, and then use +// ConnectCustomKeyStore again. If you are having trouble connecting or +// disconnecting a custom key store, see Troubleshooting a Custom Key Store +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a custom key store in a different Amazon Web Services +// account. Required permissions: kms:ConnectCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations +// +// * CreateCustomKeyStore +// +// * +// DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) ConnectCustomKeyStore(ctx context.Context, params *ConnectCustomKeyStoreInput, optFns ...func(*Options)) (*ConnectCustomKeyStoreOutput, error) { + if params == nil { + params = &ConnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ConnectCustomKeyStore", params, optFns, c.addOperationConnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ConnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ConnectCustomKeyStoreInput struct { + + // Enter the key store ID of the custom key store that you want to connect. To find + // the ID of a custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type ConnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationConnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpConnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpConnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opConnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opConnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ConnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go new file mode 100644 index 0000000000..3e43fb4fcf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateAlias.go @@ -0,0 +1,184 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a friendly name for a KMS key. Adding, deleting, or updating an alias +// can allow or deny permission to the KMS key. For details, see ABAC in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. You can use an alias to identify a KMS key +// in the KMS console, in the DescribeKey operation and in cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), +// such as Encrypt and GenerateDataKey. You can also change the KMS key that's +// associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) at any +// time. These operations don't affect the underlying KMS key. You can associate +// the alias with any customer managed key in the same Amazon Web Services Region. +// Each alias is associated with only one KMS key at a time, but a KMS key can have +// multiple aliases. A valid KMS key is required. You can't create an alias without +// a KMS key. The alias must be unique in the account and Region, but you can have +// aliases with the same name in different Regions. For detailed information about +// aliases, see Using aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) in the +// Key Management Service Developer Guide. This operation does not return a +// response. To get the alias that you created, use the ListAliases operation. The +// KMS key that you use for this operation must be in a compatible key state. For +// details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on an alias in a different Amazon Web Services account. +// Required permissions +// +// * kms:CreateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:CreateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// DeleteAlias +// +// * ListAliases +// +// * UpdateAlias +func (c *Client) CreateAlias(ctx context.Context, params *CreateAliasInput, optFns ...func(*Options)) (*CreateAliasOutput, error) { + if params == nil { + params = &CreateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateAlias", params, optFns, c.addOperationCreateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateAliasInput struct { + + // Specifies the alias name. This value must begin with alias/ followed by a name, + // such as alias/ExampleAlias. The AliasName value must be string of 1-256 + // characters. It can contain only alphanumeric characters, forward slashes (/), + // underscores (_), and dashes (-). The alias name cannot begin with alias/aws/. + // The alias/aws/ prefix is reserved for Amazon Web Services managed keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + // + // This member is required. + AliasName *string + + // Associates the alias with the specified customer managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). + // The KMS key must be in the same Amazon Web Services Region. A valid key ID is + // required. If you supply a null or empty string value, this operation returns an + // error. For help finding the key ID and ARN, see Finding the Key ID and ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) + // in the Key Management Service Developer Guide . Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type CreateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go new file mode 100644 index 0000000000..b5857b2c97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateCustomKeyStore.go @@ -0,0 +1,178 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// that is associated with an CloudHSM cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) that you +// own and manage. This operation is part of the custom key store feature +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a single-tenant key store. Before you create +// the custom key store, you must assemble the required elements, including an +// CloudHSM cluster that fulfills the requirements for a custom key store. For +// details about the required elements, see Assemble the Prerequisites +// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// in the Key Management Service Developer Guide. When the operation completes +// successfully, it returns the ID of the new custom key store. Before you can use +// your new custom key store, you need to use the ConnectCustomKeyStore operation +// to connect the new key store to its CloudHSM cluster. Even if you are not going +// to use your custom key store immediately, you might want to connect it to verify +// that all settings are correct and then disconnect it until you are ready to use +// it. For help with failures, see Troubleshooting a Custom Key Store +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a custom key store in a different Amazon Web Services +// account. Required permissions: kms:CreateCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). Related operations: +// +// * ConnectCustomKeyStore +// +// * +// DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) CreateCustomKeyStore(ctx context.Context, params *CreateCustomKeyStoreInput, optFns ...func(*Options)) (*CreateCustomKeyStoreOutput, error) { + if params == nil { + params = &CreateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateCustomKeyStore", params, optFns, c.addOperationCreateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateCustomKeyStoreInput struct { + + // Specifies a friendly name for the custom key store. The name must be unique in + // your Amazon Web Services account. + // + // This member is required. + CustomKeyStoreName *string + + // Identifies the CloudHSM cluster for the custom key store. Enter the cluster ID + // of any active CloudHSM cluster that is not already associated with a custom key + // store. To find the cluster ID, use the DescribeClusters + // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // operation. + CloudHsmClusterId *string + + // Enter the password of the kmsuser crypto user (CU) account + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) + // in the specified CloudHSM cluster. KMS logs into the cluster as this user to + // manage key material on your behalf. The password must be a string of 7 to 32 + // characters. Its value is case sensitive. This parameter tells KMS the kmsuser + // account password; it does not change the password in the CloudHSM cluster. + KeyStorePassword *string + + // Enter the content of the trust anchor certificate for the cluster. This is the + // content of the customerCA.crt file that you created when you initialized the + // cluster + // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html). + TrustAnchorCertificate *string + + noSmithyDocumentSerde +} + +type CreateCustomKeyStoreOutput struct { + + // A unique identifier for the new custom key store. + CustomKeyStoreId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go new file mode 100644 index 0000000000..06c4854d3a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateGrant.go @@ -0,0 +1,277 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds a grant to a KMS key. A grant is a policy instrument that allows Amazon Web +// Services principals to use KMS keys in cryptographic operations. It also can +// allow them to view a KMS key (DescribeKey) and create and manage grants. When +// authorizing access to a KMS key, grants are considered along with key policies +// and IAM policies. Grants are often used for temporary permissions because you +// can create one, use its permissions, and delete it without changing your key +// policies or IAM policies. For detailed information about grants, including grant +// terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// The CreateGrant operation returns a GrantToken and a GrantId. +// +// * When you +// create, retire, or revoke a grant, there might be a brief delay, usually less +// than five minutes, until the grant is available throughout KMS. This state is +// known as eventual consistency. Once the grant has achieved eventual consistency, +// the grantee principal can use the permissions in the grant without identifying +// the grant. However, to use the permissions in the grant immediately, use the +// GrantToken that CreateGrant returns. For details, see Using a grant token +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) +// in the Key Management Service Developer Guide . +// +// * The CreateGrant operation +// also returns a GrantId. You can use the GrantId and a key identifier to identify +// the grant in the RetireGrant and RevokeGrant operations. To find the grant ID, +// use the ListGrants or ListRetirableGrants operations. +// +// The KMS key that you use +// for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation on a KMS key in a different Amazon Web Services account, specify the +// key ARN in the value of the KeyId parameter. Required permissions: +// kms:CreateGrant +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * ListGrants +// +// * ListRetirableGrants +// +// * +// RetireGrant +// +// * RevokeGrant +func (c *Client) CreateGrant(ctx context.Context, params *CreateGrantInput, optFns ...func(*Options)) (*CreateGrantOutput, error) { + if params == nil { + params = &CreateGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateGrant", params, optFns, c.addOperationCreateGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateGrantInput struct { + + // The identity that gets the permissions specified in the grant. To specify the + // principal, use the Amazon Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, IAM roles, federated users, and + // assumed role users. For examples of the ARN syntax to use for specifying a + // principal, see Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + // + // This member is required. + GranteePrincipal *string + + // Identifies the KMS key for the grant. The grant gives principals permission to + // use this KMS key. Specify the key ID or key ARN of the KMS key. To specify a KMS + // key in a different Amazon Web Services account, you must use the key ARN. For + // example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // A list of operations that the grant permits. This list must include only + // operations that are permitted in a grant. Also, the operation must be supported + // on the KMS key. For example, you cannot create a grant for a symmetric + // encryption KMS key that allows the Sign operation, or a grant for an asymmetric + // KMS key that allows the GenerateDataKey operation. If you try, KMS returns a + // ValidationError exception. For details, see Grant operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // in the Key Management Service Developer Guide. + // + // This member is required. + Operations []types.GrantOperation + + // Specifies a grant constraint. KMS supports the EncryptionContextEquals and + // EncryptionContextSubset grant constraints. Each constraint value can include up + // to 8 encryption context pairs. The encryption context value in each constraint + // cannot exceed 384 characters. For information about grant constraints, see Using + // grant constraints + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) + // in the Key Management Service Developer Guide. For more information about + // encryption context, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide . The encryption context grant + // constraints allow the permissions in the grant only when the encryption context + // in the request matches (EncryptionContextEquals) or includes + // (EncryptionContextSubset) the encryption context specified in this structure. + // The encryption context grant constraints are supported only on grant operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) + // that include an EncryptionContext parameter, such as cryptographic operations on + // symmetric encryption KMS keys. Grants with grant constraints can include the + // DescribeKey and RetireGrant operations, but the constraint doesn't apply to + // these operations. If a grant with a grant constraint includes the CreateGrant + // operation, the constraint requires that any grants created with the CreateGrant + // permission have an equally strict or stricter encryption context constraint. You + // cannot use an encryption context grant constraint for cryptographic operations + // with asymmetric KMS keys or HMAC KMS keys. These keys don't support an + // encryption context. + Constraints *types.GrantConstraints + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // A friendly name for the grant. Use this value to prevent the unintended creation + // of duplicate grants when retrying this request. When this value is absent, all + // CreateGrant requests result in a new grant with a unique GrantId even if all the + // supplied parameters are identical. This can result in unintended duplicates when + // you retry the CreateGrant request. When this value is present, you can retry a + // CreateGrant request with identical parameters; if the grant already exists, the + // original GrantId is returned without creating a new grant. Note that the + // returned grant token is unique with every CreateGrant request, even when a + // duplicate GrantId is returned. All grant tokens for the same grant ID can be + // used interchangeably. + Name *string + + // The principal that has permission to use the RetireGrant operation to retire the + // grant. To specify the principal, use the Amazon Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, federated users, and assumed + // role users. For examples of the ARN syntax to use for specifying a principal, + // see Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. The + // grant determines the retiring principal. Other principals might have permission + // to retire the grant or revoke the grant. For details, see RevokeGrant and + // Retiring and revoking grants + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) + // in the Key Management Service Developer Guide. + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +type CreateGrantOutput struct { + + // The unique identifier for the grant. You can use the GrantId in a ListGrants, + // RetireGrant, or RevokeGrant operation. + GrantId *string + + // The grant token. Use a grant token when your permission to call this operation + // comes from a new grant that has not yet achieved eventual consistency. For more + // information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go new file mode 100644 index 0000000000..dca0a04de0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_CreateKey.go @@ -0,0 +1,462 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a unique customer managed KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) +// in your Amazon Web Services account and Region. In addition to the required +// parameters, you can use the optional parameters to specify a key policy, +// description, tags, and other useful elements for any key type. KMS is replacing +// the term customer master key (CMK) with KMS key and KMS key. The concept has not +// changed. To prevent breaking changes, KMS is keeping some variations of this +// term. To create different types of KMS keys, use the following guidance: +// Symmetric encryption KMS key To create a symmetric encryption KMS key, you +// aren't required to specify any parameters. The default value for KeySpec, +// SYMMETRIC_DEFAULT, and the default value for KeyUsage, ENCRYPT_DECRYPT, create a +// symmetric encryption KMS key. For technical details, see SYMMETRIC_DEFAULT key +// spec +// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-symmetric-default) +// in the Key Management Service Developer Guide. If you need a key for basic +// encryption and decryption or you +// +// are creating a KMS key to protect your +// resources in an Amazon Web Services service, create a symmetric encryption KMS +// key. The key material in a symmetric encryption key never leaves KMS +// unencrypted. You can use a symmetric encryption KMS key to encrypt and decrypt +// data up to 4,096 bytes, but they are typically used to generate data keys and +// data keys pairs. For details, see GenerateDataKey and GenerateDataKeyPair. +// Asymmetric KMS keys To create an asymmetric KMS key, use the KeySpec parameter +// to specify the type of key material in the KMS key. Then, use the KeyUsage +// parameter to determine whether the KMS key will be used to encrypt and decrypt +// or sign and verify. You can't change these properties after the KMS key is +// created. Asymmetric KMS keys contain an RSA key pair, Elliptic Curve (ECC) key +// pair, or an SM2 key pair (China Regions only). The private key in an asymmetric +// KMS key never leaves KMS unencrypted. However, you can use the GetPublicKey +// operation to download the public key so it can be used outside of KMS. KMS keys +// with RSA or SM2 key pairs can be used to encrypt or decrypt data or sign and +// verify messages (but not both). KMS keys with ECC key pairs can be used only to +// sign and verify messages. For information about asymmetric KMS keys, see +// Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. HMAC KMS key To create an HMAC +// KMS key, set the KeySpec parameter to a key spec value for HMAC KMS keys. Then +// set the KeyUsage parameter to GENERATE_VERIFY_MAC. You must set the key usage +// even though GENERATE_VERIFY_MAC is the only valid key usage value for HMAC KMS +// keys. You can't change these properties after the KMS key is created. HMAC KMS +// keys are symmetric keys that never leave KMS unencrypted. You can use HMAC keys +// to generate (GenerateMac) and verify (VerifyMac) HMAC codes for messages up to +// 4096 bytes. HMAC KMS keys are not supported in all Amazon Web Services Regions. +// If you try to create an HMAC KMS key in an Amazon Web Services Region in which +// HMAC keys are not supported, the CreateKey operation returns an +// UnsupportedOperationException. For a list of Regions in which HMAC KMS keys are +// supported, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide. Multi-Region primary keys Imported key +// material To create a multi-Region primary key in the local Amazon Web Services +// Region, use the MultiRegion parameter with a value of True. To create a +// multi-Region replica key, that is, a KMS key with the same key ID and key +// material as a primary key, but in a different Amazon Web Services Region, use +// the ReplicateKey operation. To change a replica key to a primary key, and its +// primary key to a replica key, use the UpdatePrimaryRegion operation. You can +// create multi-Region KMS keys for all supported KMS key types: symmetric +// encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and +// asymmetric signing KMS keys. You can also create multi-Region keys with imported +// key material. However, you can't create multi-Region keys in a custom key store. +// This operation supports multi-Region keys, an KMS feature that lets you create +// multiple interoperable KMS keys in different Amazon Web Services Regions. +// Because these KMS keys have the same key ID, key material, and other metadata, +// you can use them interchangeably to encrypt data in one Amazon Web Services +// Region and decrypt it in a different Amazon Web Services Region without +// re-encrypting the data or making a cross-Region call. For more information about +// multi-Region keys, see Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. To import your own key material, +// begin by creating a symmetric encryption KMS key with no key material. To do +// this, use the Origin parameter of CreateKey with a value of EXTERNAL. Next, use +// GetParametersForImport operation to get a public key and import token, and use +// the public key to encrypt your key material. Then, use ImportKeyMaterial with +// your import token to import the key material. For step-by-step instructions, see +// Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide . This feature supports only +// symmetric encryption KMS keys, including multi-Region symmetric encryption KMS +// keys. You cannot import key material into any other type of KMS key. To create a +// multi-Region primary key with imported key material, use the Origin parameter of +// CreateKey with a value of EXTERNAL and the MultiRegion parameter with a value of +// True. To create replicas of the multi-Region primary key, use the ReplicateKey +// operation. For more information about multi-Region keys, see Multi-Region keys +// in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. Custom key store To create a +// symmetric encryption KMS key in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), +// use the CustomKeyStoreId parameter to specify the custom key store. You must +// also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM cluster +// that is associated with the custom key store must have at least two active HSMs +// in different Availability Zones in the Amazon Web Services Region. Custom key +// stores support only symmetric encryption KMS keys. You cannot create an HMAC KMS +// key or an asymmetric KMS key in a custom key store. For information about custom +// key stores in KMS see Custom key stores in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// in the Key Management Service Developer Guide . Cross-account use: No. You +// cannot use this operation to create a KMS key in a different Amazon Web Services +// account. Required permissions: kms:CreateKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). To use the Tags parameter, kms:TagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy). For examples and information about related permissions, see Allow +// a user to create KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// DescribeKey +// +// * ListKeys +// +// * ScheduleKeyDeletion +func (c *Client) CreateKey(ctx context.Context, params *CreateKeyInput, optFns ...func(*Options)) (*CreateKeyOutput, error) { + if params == nil { + params = &CreateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateKey", params, optFns, c.addOperationCreateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateKeyInput struct { + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide . Use this parameter only + // when you include a policy in the request and you intend to prevent the principal + // that is making the request from making a subsequent PutKeyPolicy request on the + // KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + // Creates the KMS key in the specified custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // and the key material in its associated CloudHSM cluster. To create a KMS key in + // a custom key store, you must also specify the Origin parameter with a value of + // AWS_CLOUDHSM. The CloudHSM cluster that is associated with the custom key store + // must have at least two active HSMs, each in a different Availability Zone in the + // Region. This parameter is valid only for symmetric encryption KMS keys in a + // single Region. You cannot create any other type of KMS key in a custom key + // store. To find the ID of a custom key store, use the DescribeCustomKeyStores + // operation. The response includes the custom key store ID and the ID of the + // CloudHSM cluster. This operation is part of the custom key store feature + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // feature in KMS, which combines the convenience and extensive integration of KMS + // with the isolation and control of a single-tenant key store. + CustomKeyStoreId *string + + // Instead, use the KeySpec parameter. The KeySpec and CustomerMasterKeySpec + // parameters work the same way. Only the names differ. We recommend that you use + // KeySpec parameter in your code. However, to avoid breaking changes, KMS will + // support both parameters. + // + // Deprecated: This parameter has been deprecated. Instead, use the KeySpec + // parameter. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // A description of the KMS key. Use a description that helps you decide whether + // the KMS key is appropriate for a task. The default value is an empty string (no + // description). To set or change the description after the key is created, use + // UpdateKeyDescription. + Description *string + + // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, + // creates a KMS key with a 256-bit AES-GCM key that is used for encryption and + // decryption, except in China Regions, where it creates a 128-bit symmetric key + // that uses SM4 encryption. For help choosing a key spec for your KMS key, see + // Choosing a KMS key type + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) + // in the Key Management Service Developer Guide . The KeySpec determines whether + // the KMS key contains a symmetric key or an asymmetric key pair. It also + // determines the cryptographic algorithms that the KMS key supports. You can't + // change the KeySpec after the KMS key is created. To further restrict the + // algorithms that can be used with the KMS key, use a condition key in its key + // policy or IAM policy. For more information, see kms:EncryptionAlgorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), + // kms:MacAlgorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) + // or kms:Signing Algorithm + // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) + // in the Key Management Service Developer Guide . Amazon Web Services services + // that are integrated with KMS + // (http://aws.amazon.com/kms/features/#AWS_Service_Integration) use symmetric + // encryption KMS keys to protect your data. These services do not support + // asymmetric KMS keys or HMAC KMS keys. KMS supports the following key specs for + // KMS keys: + // + // * Symmetric encryption key (default) + // + // * SYMMETRIC_DEFAULT + // + // * HMAC + // keys (symmetric) + // + // * HMAC_224 + // + // * HMAC_256 + // + // * HMAC_384 + // + // * HMAC_512 + // + // * Asymmetric + // RSA key pairs + // + // * RSA_2048 + // + // * RSA_3072 + // + // * RSA_4096 + // + // * Asymmetric NIST-recommended + // elliptic curve key pairs + // + // * ECC_NIST_P256 (secp256r1) + // + // * ECC_NIST_P384 + // (secp384r1) + // + // * ECC_NIST_P521 (secp521r1) + // + // * Other asymmetric elliptic curve key + // pairs + // + // * ECC_SECG_P256K1 (secp256k1), commonly used for cryptocurrencies. + // + // * SM2 + // key pairs (China Regions only) + // + // * SM2 + KeySpec types.KeySpec + + // Determines the cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. This + // parameter is optional when you are creating a symmetric encryption KMS key; + // otherwise, it is required. You can't change the KeyUsage value after the KMS key + // is created. Select only one valid value. + // + // * For symmetric encryption KMS keys, + // omit the parameter or specify ENCRYPT_DECRYPT. + // + // * For HMAC KMS keys (symmetric), + // specify GENERATE_VERIFY_MAC. + // + // * For asymmetric KMS keys with RSA key material, + // specify ENCRYPT_DECRYPT or SIGN_VERIFY. + // + // * For asymmetric KMS keys with ECC key + // material, specify SIGN_VERIFY. + // + // * For asymmetric KMS keys with SM2 key material + // (China Regions only), specify ENCRYPT_DECRYPT or SIGN_VERIFY. + KeyUsage types.KeyUsageType + + // Creates a multi-Region primary key that you can replicate into other Amazon Web + // Services Regions. You cannot change this value after you create the KMS key. For + // a multi-Region key, set this parameter to True. For a single-Region KMS key, + // omit this parameter or set it to False. The default value is False. This + // operation supports multi-Region keys, an KMS feature that lets you create + // multiple interoperable KMS keys in different Amazon Web Services Regions. + // Because these KMS keys have the same key ID, key material, and other metadata, + // you can use them interchangeably to encrypt data in one Amazon Web Services + // Region and decrypt it in a different Amazon Web Services Region without + // re-encrypting the data or making a cross-Region call. For more information about + // multi-Region keys, see Multi-Region keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // in the Key Management Service Developer Guide. This value creates a primary key, + // not a replica. To create a replica key, use the ReplicateKey operation. You can + // create a multi-Region version of a symmetric encryption KMS key, an HMAC KMS + // key, an asymmetric KMS key, or a KMS key with imported key material. However, + // you cannot create a multi-Region key in a custom key store. + MultiRegion *bool + + // The source of the key material for the KMS key. You cannot change the origin + // after you create the KMS key. The default is AWS_KMS, which means that KMS + // creates the key material. To create a KMS key with no key material (for imported + // key material), set the value to EXTERNAL. For more information about importing + // key material into KMS, see Importing Key Material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in + // the Key Management Service Developer Guide. This value is valid only for + // symmetric encryption KMS keys. To create a KMS key in an KMS custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // and create its key material in the associated CloudHSM cluster, set this value + // to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to identify + // the custom key store. This value is valid only for symmetric encryption KMS + // keys. + Origin types.OriginType + + // The key policy to attach to the KMS key. If you do not specify a key policy, KMS + // attaches a default key policy to the KMS key. For more information, see Default + // key policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // in the Key Management Service Developer Guide. If you provide a key policy, it + // must meet the following criteria: + // + // * If you don't set + // BypassPolicyLockoutSafetyCheck to True, the key policy must allow the principal + // that is making the CreateKey request to make a subsequent PutKeyPolicy request + // on the KMS key. This reduces the risk that the KMS key becomes unmanageable. For + // more information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide . + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // + // A key + // policy document can include only the following characters: + // + // * Printable ASCII + // characters from the space character (\u0020) through the end of the ASCII + // character range. + // + // * Printable characters in the Basic Latin and Latin-1 + // Supplement character set (through \u00FF). + // + // * The tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) special characters + // + // For information about + // key policies, see Key policies in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the + // Key Management Service Developer Guide. For help writing and formatting a JSON + // policy document, see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + Policy *string + + // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS key + // when it is created. To tag an existing KMS key, use the TagResource operation. + // Tagging or untagging a KMS key can allow or deny permission to the KMS key. For + // details, see ABAC in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. To use this parameter, you must have + // kms:TagResource + // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // permission in an IAM policy. Each tag consists of a tag key and a tag value. + // Both the tag key and the tag value are required, but the tag value can be an + // empty (null) string. You cannot have more than one tag on a KMS key with the + // same tag key. If you specify an existing tag key with a different tag value, KMS + // replaces the current tag value with the specified one. When you add tags to an + // Amazon Web Services resource, Amazon Web Services generates a cost allocation + // report with usage and costs aggregated by tags. Tags can also be used to control + // access to a KMS key. For details, see Tagging Keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + Tags []types.Tag + + noSmithyDocumentSerde +} + +type CreateKeyOutput struct { + + // Metadata associated with the KMS key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "CreateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go new file mode 100644 index 0000000000..d606bca3a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Decrypt.go @@ -0,0 +1,262 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext that was encrypted by a KMS key using any of the following +// operations: +// +// * Encrypt +// +// * GenerateDataKey +// +// * GenerateDataKeyPair +// +// * +// GenerateDataKeyWithoutPlaintext +// +// * GenerateDataKeyPairWithoutPlaintext +// +// You can +// use this operation to decrypt ciphertext that was encrypted under a symmetric +// encryption KMS key or an asymmetric encryption KMS key. When the KMS key is +// asymmetric, you must specify the KMS key and the encryption algorithm that was +// used to encrypt the ciphertext. For information about asymmetric KMS keys, see +// Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. The Decrypt operation also +// decrypts ciphertext that was encrypted outside of KMS by the public key in an +// KMS asymmetric KMS key. However, it cannot decrypt ciphertext produced by other +// libraries, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon +// S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). +// These libraries return a ciphertext format that is incompatible with KMS. If the +// ciphertext was encrypted under a symmetric encryption KMS key, the KeyId +// parameter is optional. KMS can get this information from metadata that it adds +// to the symmetric ciphertext blob. This feature adds durability to your +// implementation by ensuring that authorized users can decrypt ciphertext decades +// after it was encrypted, even if they've lost track of the key ID. However, +// specifying the KMS key is always recommended as a best practice. When you use +// the KeyId parameter to specify a KMS key, KMS only uses the KMS key you specify. +// If the ciphertext was encrypted under a different KMS key, the Decrypt operation +// fails. This practice ensures that you use the KMS key that you intend. Whenever +// possible, use key policies to give users permission to call the Decrypt +// operation on a particular KMS key, instead of using IAM policies. Otherwise, you +// might create an IAM user policy that gives the user Decrypt permission on all +// KMS keys. This user could decrypt ciphertext that was encrypted by KMS keys in +// other accounts if the key policy for the cross-account KMS key permits it. If +// you must use an IAM policy for Decrypt permissions, limit the user to particular +// KMS keys or particular trusted accounts. For details, see Best practices for IAM +// policies +// (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) +// in the Key Management Service Developer Guide. Applications in Amazon Web +// Services Nitro Enclaves can call this operation by using the Amazon Web Services +// Nitro Enclaves Development Kit +// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the +// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:Decrypt +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * ReEncrypt +func (c *Client) Decrypt(ctx context.Context, params *DecryptInput, optFns ...func(*Options)) (*DecryptOutput, error) { + if params == nil { + params = &DecryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Decrypt", params, optFns, c.addOperationDecryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DecryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DecryptInput struct { + + // Ciphertext to be decrypted. The blob includes metadata. + // + // This member is required. + CiphertextBlob []byte + + // Specifies the encryption algorithm that will be used to decrypt the ciphertext. + // Specify the same algorithm that was used to encrypt the data. If you specify a + // different algorithm, the Decrypt operation fails. This parameter is required + // only when the ciphertext was encrypted under an asymmetric KMS key. The default + // value, SYMMETRIC_DEFAULT, represents the only supported algorithm that is valid + // for symmetric encryption KMS keys. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use when decrypting the data. An encryption + // context is valid only for cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric encryption KMS key. The standard asymmetric encryption + // algorithms and HMAC algorithms that KMS uses do not support an encryption + // context. An encryption context is a collection of non-secret key-value pairs + // that represent additional authenticated data. When you use an encryption context + // to encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the KMS key that KMS uses to decrypt the ciphertext. Enter a key ID of + // the KMS key that was used to encrypt the ciphertext. If you identify a different + // KMS key, the Decrypt operation throws an IncorrectKeyException. This parameter + // is required only when the ciphertext was encrypted under an asymmetric KMS key. + // If you used a symmetric encryption KMS key, KMS can get the KMS key from + // metadata that it adds to the symmetric ciphertext blob. However, it is always + // recommended as a best practice. This practice ensures that you use the KMS key + // that you intend. To specify a KMS key, use its key ID, key ARN, alias name, or + // alias ARN. When using an alias name, prefix it with "alias/". To specify a KMS + // key in a different Amazon Web Services account, you must use the key ARN or + // alias ARN. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + KeyId *string + + noSmithyDocumentSerde +} + +type DecryptOutput struct { + + // The encryption algorithm that was used to decrypt the ciphertext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to decrypt the ciphertext. + KeyId *string + + // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDecryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDecrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDecryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDecrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Decrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go new file mode 100644 index 0000000000..4f90c74a77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteAlias.go @@ -0,0 +1,147 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified alias. Adding, deleting, or updating an alias can allow or +// deny permission to the KMS key. For details, see ABAC in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. Because an alias is not a property of a KMS +// key, you can delete and change the aliases of a KMS key without affecting the +// KMS key. Also, aliases do not appear in the response from the DescribeKey +// operation. To get the aliases of all KMS keys, use the ListAliases operation. +// Each KMS key can have multiple aliases. To change the alias of a KMS key, use +// DeleteAlias to delete the current alias and CreateAlias to create a new alias. +// To associate an existing alias with a different KMS key, call UpdateAlias. +// Cross-account use: No. You cannot perform this operation on an alias in a +// different Amazon Web Services account. Required permissions +// +// * kms:DeleteAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:DeleteAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * ListAliases +// +// * UpdateAlias +func (c *Client) DeleteAlias(ctx context.Context, params *DeleteAliasInput, optFns ...func(*Options)) (*DeleteAliasOutput, error) { + if params == nil { + params = &DeleteAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteAlias", params, optFns, c.addOperationDeleteAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteAliasInput struct { + + // The alias to be deleted. The alias name must begin with alias/ followed by the + // alias name, such as alias/ExampleAlias. + // + // This member is required. + AliasName *string + + noSmithyDocumentSerde +} + +type DeleteAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go new file mode 100644 index 0000000000..e0a2616933 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteCustomKeyStore.go @@ -0,0 +1,158 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// This operation does not delete the CloudHSM cluster that is associated with the +// custom key store, or affect any users or keys in the cluster. The custom key +// store that you delete cannot contain any KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys). +// Before deleting the key store, verify that you will never need to use any of the +// KMS keys in the key store for any cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. When +// the scheduled waiting period expires, the ScheduleKeyDeletion operation deletes +// the KMS keys. Then it makes a best effort to delete the key material from the +// associated cluster. However, you might need to manually delete the orphaned key +// material +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// from the cluster and its backups. After all KMS keys are deleted from KMS, use +// DisconnectCustomKeyStore to disconnect the key store from KMS. Then, you can +// delete the custom key store. Instead of deleting the custom key store, consider +// using DisconnectCustomKeyStore to disconnect it from KMS. While the key store is +// disconnected, you cannot create or use the KMS keys in the key store. But, you +// do not need to delete KMS keys and you can reconnect a disconnected custom key +// store at any time. If the operation succeeds, it returns a JSON object with no +// properties. This operation is part of the custom key store feature +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a single-tenant key store. Cross-account use: +// No. You cannot perform this operation on a custom key store in a different +// Amazon Web Services account. Required permissions: kms:DeleteCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) DeleteCustomKeyStore(ctx context.Context, params *DeleteCustomKeyStoreInput, optFns ...func(*Options)) (*DeleteCustomKeyStoreOutput, error) { + if params == nil { + params = &DeleteCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteCustomKeyStore", params, optFns, c.addOperationDeleteCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to delete. To find the ID of a + // custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DeleteCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go new file mode 100644 index 0000000000..7f8839b963 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DeleteImportedKeyMaterial.go @@ -0,0 +1,145 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes key material that you previously imported. This operation makes the +// specified KMS key unusable. For more information about importing key material +// into KMS, see Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. When the specified KMS key is in the +// PendingDeletion state, this operation does not change the KMS key's state. +// Otherwise, it changes the KMS key's state to PendingImport. After you delete key +// material, you can use ImportKeyMaterial to reimport the same key material into +// the KMS key. The KMS key that you use for this operation must be in a compatible +// key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DeleteImportedKeyMaterial +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetParametersForImport +// +// * ImportKeyMaterial +func (c *Client) DeleteImportedKeyMaterial(ctx context.Context, params *DeleteImportedKeyMaterialInput, optFns ...func(*Options)) (*DeleteImportedKeyMaterialOutput, error) { + if params == nil { + params = &DeleteImportedKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteImportedKeyMaterial", params, optFns, c.addOperationDeleteImportedKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteImportedKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteImportedKeyMaterialInput struct { + + // Identifies the KMS key from which you are deleting imported key material. The + // Origin of the KMS key must be EXTERNAL. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DeleteImportedKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteImportedKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDeleteImportedKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteImportedKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteImportedKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DeleteImportedKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go new file mode 100644 index 0000000000..01f1dab207 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeCustomKeyStores.go @@ -0,0 +1,280 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets information about custom key stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// in the account and Region. This operation is part of the custom key store +// feature +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a single-tenant key store. By default, this +// operation returns information about all custom key stores in the account and +// Region. To get only information about a particular custom key store, use either +// the CustomKeyStoreName or CustomKeyStoreId parameter (but not both). To +// determine whether the custom key store is connected to its CloudHSM cluster, use +// the ConnectionState element in the response. If an attempt to connect the custom +// key store failed, the ConnectionState value is FAILED and the +// ConnectionErrorCode element in the response indicates the cause of the failure. +// For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. +// Custom key stores have a DISCONNECTED connection state if the key store has +// never been connected or you use the DisconnectCustomKeyStore operation to +// disconnect it. If your custom key store state is CONNECTED but you are having +// trouble using it, make sure that its associated CloudHSM cluster is active and +// contains the minimum number of HSMs required for the operation, if any. For help +// repairing your custom key store, see the Troubleshooting Custom Key Stores +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) topic +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a custom key store in a different Amazon Web Services +// account. Required permissions: kms:DescribeCustomKeyStores +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DisconnectCustomKeyStore +// +// * +// UpdateCustomKeyStore +func (c *Client) DescribeCustomKeyStores(ctx context.Context, params *DescribeCustomKeyStoresInput, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeCustomKeyStores", params, optFns, c.addOperationDescribeCustomKeyStoresMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeCustomKeyStoresOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeCustomKeyStoresInput struct { + + // Gets only information about the specified custom key store. Enter the key store + // ID. By default, this operation gets information about all custom key stores in + // the account and Region. To limit the output to a particular custom key store, + // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, but not + // both. + CustomKeyStoreId *string + + // Gets only information about the specified custom key store. Enter the friendly + // name of the custom key store. By default, this operation gets information about + // all custom key stores in the account and Region. To limit the output to a + // particular custom key store, you can use either the CustomKeyStoreId or + // CustomKeyStoreName parameter, but not both. + CustomKeyStoreName *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type DescribeCustomKeyStoresOutput struct { + + // Contains metadata about each custom key store. + CustomKeyStores []types.CustomKeyStoresListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeCustomKeyStoresMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeCustomKeyStores{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomKeyStores(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// DescribeCustomKeyStoresAPIClient is a client that implements the +// DescribeCustomKeyStores operation. +type DescribeCustomKeyStoresAPIClient interface { + DescribeCustomKeyStores(context.Context, *DescribeCustomKeyStoresInput, ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) +} + +var _ DescribeCustomKeyStoresAPIClient = (*Client)(nil) + +// DescribeCustomKeyStoresPaginatorOptions is the paginator options for +// DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// DescribeCustomKeyStoresPaginator is a paginator for DescribeCustomKeyStores +type DescribeCustomKeyStoresPaginator struct { + options DescribeCustomKeyStoresPaginatorOptions + client DescribeCustomKeyStoresAPIClient + params *DescribeCustomKeyStoresInput + nextToken *string + firstPage bool +} + +// NewDescribeCustomKeyStoresPaginator returns a new +// DescribeCustomKeyStoresPaginator +func NewDescribeCustomKeyStoresPaginator(client DescribeCustomKeyStoresAPIClient, params *DescribeCustomKeyStoresInput, optFns ...func(*DescribeCustomKeyStoresPaginatorOptions)) *DescribeCustomKeyStoresPaginator { + if params == nil { + params = &DescribeCustomKeyStoresInput{} + } + + options := DescribeCustomKeyStoresPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &DescribeCustomKeyStoresPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *DescribeCustomKeyStoresPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next DescribeCustomKeyStores page. +func (p *DescribeCustomKeyStoresPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*DescribeCustomKeyStoresOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.DescribeCustomKeyStores(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opDescribeCustomKeyStores(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DescribeCustomKeyStores", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go new file mode 100644 index 0000000000..7fb450d2aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DescribeKey.go @@ -0,0 +1,209 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Provides detailed information about a KMS key. You can run DescribeKey on a +// customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) +// or an Amazon Web Services managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). +// This detailed information includes the key ARN, creation date (and deletion +// date, if applicable), the key state, and the origin and expiration date (if any) +// of the key material. It includes fields, like KeySpec, that help you distinguish +// different types of KMS keys. It also displays the key usage (encryption, +// signing, or generating and verifying MACs) and the algorithms that the KMS key +// supports. For KMS keys in custom key stores, it includes information about the +// custom key store, such as the key store ID and the CloudHSM cluster ID. For +// multi-Region keys, it displays the primary key and all related replica keys. +// DescribeKey does not return the following information: +// +// * Aliases associated +// with the KMS key. To get this information, use ListAliases. +// +// * Whether automatic +// key rotation is enabled on the KMS key. To get this information, use +// GetKeyRotationStatus. Also, some key states prevent a KMS key from being +// automatically rotated. For details, see How Automatic Key Rotation Works +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) +// in the Key Management Service Developer Guide. +// +// * Tags on the KMS key. To get +// this information, use ListResourceTags. +// +// * Key policies and grants on the KMS +// key. To get this information, use GetKeyPolicy and ListGrants. +// +// In general, +// DescribeKey is a non-mutating operation. It returns data about KMS keys, but +// doesn't change them. However, Amazon Web Services services use DescribeKey to +// create Amazon Web Services managed keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// from a predefined Amazon Web Services alias with no key ID. Cross-account use: +// Yes. To perform this operation with a KMS key in a different Amazon Web Services +// account, specify the key ARN or alias ARN in the value of the KeyId parameter. +// Required permissions: kms:DescribeKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetKeyPolicy +// +// * GetKeyRotationStatus +// +// * +// ListAliases +// +// * ListGrants +// +// * ListKeys +// +// * ListResourceTags +// +// * ListRetirableGrants +func (c *Client) DescribeKey(ctx context.Context, params *DescribeKeyInput, optFns ...func(*Options)) (*DescribeKeyOutput, error) { + if params == nil { + params = &DescribeKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeKey", params, optFns, c.addOperationDescribeKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeKeyInput struct { + + // Describes the specified KMS key. If you specify a predefined Amazon Web Services + // alias (an Amazon Web Services alias with no key ID), KMS associates the alias + // with an Amazon Web Services managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) + // and returns its KeyId and Arn in the response. To specify a KMS key, use its key + // ID, key ARN, alias name, or alias ARN. When using an alias name, prefix it with + // "alias/". To specify a KMS key in a different Amazon Web Services account, you + // must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type DescribeKeyOutput struct { + + // Metadata associated with the key. + KeyMetadata *types.KeyMetadata + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDescribeKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DescribeKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go new file mode 100644 index 0000000000..066f679ef0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKey.go @@ -0,0 +1,138 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the state of a KMS key to disabled. This change temporarily prevents use of +// the KMS key for cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// For more information about how key state affects the use of a KMS key, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide . The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DisableKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: EnableKey +func (c *Client) DisableKey(ctx context.Context, params *DisableKeyInput, optFns ...func(*Options)) (*DisableKeyOutput, error) { + if params == nil { + params = &DisableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKey", params, optFns, c.addOperationDisableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyInput struct { + + // Identifies the KMS key to disable. Specify the key ID or key ARN of the KMS key. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go new file mode 100644 index 0000000000..b9735cadda --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisableKeyRotation.go @@ -0,0 +1,168 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disables automatic rotation of the key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the +// specified symmetric encryption KMS key. Automatic key rotation is supported only +// on symmetric encryption KMS keys. You cannot enable or disable automatic +// rotation of asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key. You can enable (EnableKeyRotation) and +// disable automatic rotation of the key material in customer managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// Key material rotation of Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material for every year. +// Rotation of Amazon Web Services owned KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services +// managed keys from every three years to every year. For details, see +// EnableKeyRotation. The KMS key that you use for this operation must be in a +// compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:DisableKeyRotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * EnableKeyRotation +// +// * GetKeyRotationStatus +func (c *Client) DisableKeyRotation(ctx context.Context, params *DisableKeyRotationInput, optFns ...func(*Options)) (*DisableKeyRotationOutput, error) { + if params == nil { + params = &DisableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisableKeyRotation", params, optFns, c.addOperationDisableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable or disable + // automatic rotation of asymmetric KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks), + // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), + // KMS keys with imported key material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or + // KMS keys in a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // Specify the key ID or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type DisableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go new file mode 100644 index 0000000000..e54d663396 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_DisconnectCustomKeyStore.go @@ -0,0 +1,150 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Disconnects the custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// from its associated CloudHSM cluster. While a custom key store is disconnected, +// you can manage the custom key store and its KMS keys, but you cannot create or +// use KMS keys in the custom key store. You can reconnect the custom key store at +// any time. While a custom key store is disconnected, all attempts to create KMS +// keys in the custom key store or to use existing KMS keys in cryptographic +// operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// will fail. This action can prevent users from storing and accessing sensitive +// data. To find the connection state of a custom key store, use the +// DescribeCustomKeyStores operation. To reconnect a custom key store, use the +// ConnectCustomKeyStore operation. If the operation succeeds, it returns a JSON +// object with no properties. This operation is part of the custom key store +// feature +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a single-tenant key store. Cross-account use: +// No. You cannot perform this operation on a custom key store in a different +// Amazon Web Services account. Required permissions: kms:DisconnectCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * +// UpdateCustomKeyStore +func (c *Client) DisconnectCustomKeyStore(ctx context.Context, params *DisconnectCustomKeyStoreInput, optFns ...func(*Options)) (*DisconnectCustomKeyStoreOutput, error) { + if params == nil { + params = &DisconnectCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DisconnectCustomKeyStore", params, optFns, c.addOperationDisconnectCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DisconnectCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DisconnectCustomKeyStoreInput struct { + + // Enter the ID of the custom key store you want to disconnect. To find the ID of a + // custom key store, use the DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + noSmithyDocumentSerde +} + +type DisconnectCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDisconnectCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpDisconnectCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDisconnectCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisconnectCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDisconnectCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "DisconnectCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go new file mode 100644 index 0000000000..f44645321a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKey.go @@ -0,0 +1,135 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Sets the key state of a KMS key to enabled. This allows you to use the KMS key +// for cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// The KMS key that you use for this operation must be in a compatible key state. +// For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:EnableKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: DisableKey +func (c *Client) EnableKey(ctx context.Context, params *EnableKeyInput, optFns ...func(*Options)) (*EnableKeyOutput, error) { + if params == nil { + params = &EnableKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKey", params, optFns, c.addOperationEnableKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyInput struct { + + // Identifies the KMS key to enable. Specify the key ID or key ARN of the KMS key. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type EnableKeyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEnableKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "EnableKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go new file mode 100644 index 0000000000..e368de92ea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_EnableKeyRotation.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Enables automatic rotation of the key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) of the +// specified symmetric encryption KMS key. When you enable automatic rotation of +// acustomer managed KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation of the +// key material for your KMS keys in CloudTrail and Amazon CloudWatch. To disable +// rotation of the key material in a customer managed KMS key, use the +// DisableKeyRotation operation. Automatic key rotation is supported only on +// symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable or disable automatic rotation of asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key. You cannot enable or disable automatic +// rotation Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). +// KMS always rotates the key material of Amazon Web Services managed keys every +// year. Rotation of Amazon Web Services owned KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk) +// varies. In May 2022, KMS changed the rotation schedule for Amazon Web Services +// managed keys from every three years (approximately 1,095 days) to every year +// (approximately 365 days). New Amazon Web Services managed keys are automatically +// rotated one year after they are created, and approximately every year +// thereafter. Existing Amazon Web Services managed keys are automatically rotated +// one year after their most recent rotation, and every year thereafter. The KMS +// key that you use for this operation must be in a compatible key state. For +// details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:EnableKeyRotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DisableKeyRotation +// +// * GetKeyRotationStatus +func (c *Client) EnableKeyRotation(ctx context.Context, params *EnableKeyRotationInput, optFns ...func(*Options)) (*EnableKeyRotationOutput, error) { + if params == nil { + params = &EnableKeyRotationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "EnableKeyRotation", params, optFns, c.addOperationEnableKeyRotationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EnableKeyRotationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EnableKeyRotationInput struct { + + // Identifies a symmetric encryption KMS key. You cannot enable or disable + // automatic rotation of asymmetric KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), + // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), + // KMS keys with imported key material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or + // KMS keys in a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // The key rotation status of these KMS keys is always false. To enable or disable + // automatic rotation of a set of related multi-Region keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), + // set the property on the primary key. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type EnableKeyRotationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEnableKeyRotationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEnableKeyRotation{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEnableKeyRotationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKeyRotation(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEnableKeyRotation(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "EnableKeyRotation", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go new file mode 100644 index 0000000000..434493eb44 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Encrypt.go @@ -0,0 +1,259 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric +// or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT. You can use this +// operation to encrypt small amounts of arbitrary data, such as a personal +// identifier or database password, or other sensitive information. You don't need +// to use the Encrypt operation to encrypt a data key. The GenerateDataKey and +// GenerateDataKeyPair operations return a plaintext data key and an encrypted copy +// of that data key. If you use a symmetric encryption KMS key, you can use an +// encryption context to add additional security to your encryption operation. If +// you specify an EncryptionContext when encrypting data, you must specify the same +// encryption context (a case-sensitive exact match) when decrypting the data. +// Otherwise, the request to decrypt fails with an InvalidCiphertextException. For +// more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. If you specify an asymmetric KMS +// key, you must also specify the encryption algorithm. The algorithm must be +// compatible with the KMS key spec. When you use an asymmetric KMS key to encrypt +// or reencrypt data, be sure to record the KMS key and encryption algorithm that +// you choose. You will be required to provide the same KMS key and encryption +// algorithm when you decrypt the data. If the KMS key and algorithm do not match +// the values used to encrypt the data, the decrypt operation fails. You are not +// required to supply the key ID and encryption algorithm when you decrypt with +// symmetric encryption KMS keys because KMS stores this information in the +// ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. The maximum size of the data that you can encrypt +// varies with the type of KMS key and the encryption algorithm that you choose. +// +// * +// Symmetric encryption KMS keys +// +// * SYMMETRIC_DEFAULT: 4096 bytes +// +// * RSA_2048 +// +// * +// RSAES_OAEP_SHA_1: 214 bytes +// +// * RSAES_OAEP_SHA_256: 190 bytes +// +// * RSA_3072 +// +// * +// RSAES_OAEP_SHA_1: 342 bytes +// +// * RSAES_OAEP_SHA_256: 318 bytes +// +// * RSA_4096 +// +// * +// RSAES_OAEP_SHA_1: 470 bytes +// +// * RSAES_OAEP_SHA_256: 446 bytes +// +// * SM2PKE: 1024 +// bytes (China Regions only) +// +// The KMS key that you use for this operation must be +// in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Encrypt +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +func (c *Client) Encrypt(ctx context.Context, params *EncryptInput, optFns ...func(*Options)) (*EncryptOutput, error) { + if params == nil { + params = &EncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Encrypt", params, optFns, c.addOperationEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*EncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type EncryptInput struct { + + // Identifies the KMS key to use in the encryption operation. The KMS key must have + // a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use the + // DescribeKey operation. To specify a KMS key, use its key ID, key ARN, alias + // name, or alias ARN. When using an alias name, prefix it with "alias/". To + // specify a KMS key in a different Amazon Web Services account, you must use the + // key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Data to be encrypted. + // + // This member is required. + Plaintext []byte + + // Specifies the encryption algorithm that KMS will use to encrypt the plaintext + // message. The algorithm must be compatible with the KMS key that you specify. + // This parameter is required only for asymmetric KMS keys. The default value, + // SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. If + // you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context that will be used to encrypt the data. An + // encryption context is valid only for cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric encryption KMS key. The standard asymmetric encryption + // algorithms and HMAC algorithms that KMS uses do not support an encryption + // context. An encryption context is a collection of non-secret key-value pairs + // that represent additional authenticated data. When you use an encryption context + // to encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type EncryptOutput struct { + + // The encrypted plaintext. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to encrypt the plaintext. + EncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to encrypt the plaintext. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpEncrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Encrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go new file mode 100644 index 0000000000..073edf96e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKey.go @@ -0,0 +1,266 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a plaintext copy of the data key and a copy that is encrypted under a +// symmetric encryption KMS key that you specify. The bytes in the plaintext key +// are random; they are not related to the caller or the KMS key. You can use the +// plaintext key to encrypt your data outside of KMS and store the encrypted data +// key with the encrypted data. To generate a data key, specify the symmetric +// encryption KMS key that will be used to encrypt the data key. You cannot use an +// asymmetric KMS key to encrypt data keys. To get the type of your KMS key, use +// the DescribeKey operation. You must also specify the length of the data key. Use +// either the KeySpec or NumberOfBytes parameters (but not both). For 128-bit and +// 256-bit data keys, use the KeySpec parameter. To generate an SM4 data key (China +// Regions only), specify a KeySpec value of AES_128 or NumberOfBytes value of 128. +// The symmetric encryption key used in China Regions to encrypt your data key is +// an SM4 encryption key. To get only an encrypted copy of the data key, use +// GenerateDataKeyWithoutPlaintext. To generate an asymmetric data key pair, use +// the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext operation. To get +// a cryptographically secure random byte string, use GenerateRandom. You can use +// an optional encryption context to add additional security to the encryption +// operation. If you specify an EncryptionContext, you must specify the same +// encryption context (a case-sensitive exact match) when decrypting the encrypted +// data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException. For more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. Applications in Amazon Web +// Services Nitro Enclaves can call this operation by using the Amazon Web Services +// Nitro Enclaves Development Kit +// (https://github.com/aws/aws-nitro-enclaves-sdk-c). For information about the +// supporting parameters, see How Amazon Web Services Nitro Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. How to use your data key We +// recommend that you use the following pattern to encrypt data locally in your +// application. You can write your own code or use a client-side encryption +// library, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), the Amazon +// DynamoDB Encryption Client +// (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), or +// Amazon S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// to do these tasks for you. To encrypt data outside of KMS: +// +// * Use the +// GenerateDataKey operation to get a data key. +// +// * Use the plaintext data key (in +// the Plaintext field of the response) to encrypt your data outside of KMS. Then +// erase the plaintext data key from memory. +// +// * Store the encrypted data key (in +// the CiphertextBlob field of the response) with the encrypted data. +// +// To decrypt +// data outside of KMS: +// +// * Use the Decrypt operation to decrypt the encrypted data +// key. The operation returns a plaintext copy of the data key. +// +// * Use the +// plaintext data key to decrypt data outside of KMS, then erase the plaintext data +// key from memory. +// +// Cross-account use: Yes. To perform this operation with a KMS +// key in a different Amazon Web Services account, specify the key ARN or alias ARN +// in the value of the KeyId parameter. Required permissions: kms:GenerateDataKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKeyPair +// +// * +// GenerateDataKeyPairWithoutPlaintext +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKey(ctx context.Context, params *GenerateDataKeyInput, optFns ...func(*Options)) (*GenerateDataKeyOutput, error) { + if params == nil { + params = &GenerateDataKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKey", params, optFns, c.addOperationGenerateDataKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the encryption context that will be used when encrypting the data key. + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the length of the data key. Use AES_128 to generate a 128-bit + // symmetric key, or AES_256 to generate a 256-bit symmetric key. You must specify + // either the KeySpec or the NumberOfBytes parameter (but not both) in every + // GenerateDataKey request. + KeySpec types.DataKeySpec + + // Specifies the length of the data key in bytes. For example, use the value 64 to + // generate a 512-bit data key (64 bytes is 512 bits). For 128-bit (16-byte) and + // 256-bit (32-byte) data keys, use the KeySpec parameter. You must specify either + // the KeySpec or the NumberOfBytes parameter (but not both) in every + // GenerateDataKey request. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateDataKeyOutput struct { + + // The encrypted copy of the data key. When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the data key. + KeyId *string + + // The plaintext data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use this + // data key to encrypt your data outside of KMS. Then, remove it from memory as + // soon as possible. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go new file mode 100644 index 0000000000..db884ddfd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPair.go @@ -0,0 +1,249 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This operation +// returns a plaintext public key, a plaintext private key, and a copy of the +// private key that is encrypted under the symmetric encryption KMS key you +// specify. You can use the data key pair to perform asymmetric cryptography and +// implement digital signatures outside of KMS. The bytes in the keys are random; +// they not related to the caller or to the KMS key that is used to encrypt the +// private key. You can use the public key that GenerateDataKeyPair returns to +// encrypt data or verify a signature outside of KMS. Then, store the encrypted +// private key with the data. When you are ready to decrypt data or sign a message, +// you can use the Decrypt operation to decrypt the encrypted private key. To +// generate a data key pair, you must specify a symmetric encryption KMS key to +// encrypt the private key in a data key pair. You cannot use an asymmetric KMS key +// or a KMS key in a custom key store. To get the type and origin of your KMS key, +// use the DescribeKey operation. Use the KeyPairSpec parameter to choose an RSA or +// Elliptic Curve (ECC) data key pair. In China Regions, you can also choose an SM2 +// data key pair. KMS recommends that you use ECC key pairs for signing, and use +// RSA and SM2 key pairs for either encryption or signing, but not both. However, +// KMS cannot enforce any restrictions on the use of data key pairs outside of KMS. +// If you are using the data key pair to encrypt data, or for any operation where +// you don't immediately need a private key, consider using the +// GenerateDataKeyPairWithoutPlaintext operation. +// GenerateDataKeyPairWithoutPlaintext returns a plaintext public key and an +// encrypted private key, but omits the plaintext private key that you need only to +// decrypt ciphertext or sign a message. Later, when you need to decrypt the data +// or sign a message, use the Decrypt operation to decrypt the encrypted private +// key in the data key pair. GenerateDataKeyPair returns a unique data key pair for +// each request. The bytes in the keys are random; they are not related to the +// caller or the KMS key that is used to encrypt the private key. The public key is +// a DER-encoded X.509 SubjectPublicKeyInfo, as specified in RFC 5280 +// (https://tools.ietf.org/html/rfc5280). The private key is a DER-encoded PKCS8 +// PrivateKeyInfo, as specified in RFC 5958 (https://tools.ietf.org/html/rfc5958). +// You can use an optional encryption context to add additional security to the +// encryption operation. If you specify an EncryptionContext, you must specify the +// same encryption context (a case-sensitive exact match) when decrypting the +// encrypted data key. Otherwise, the request to decrypt fails with an +// InvalidCiphertextException. For more information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyPair +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPairWithoutPlaintext +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKeyPair(ctx context.Context, params *GenerateDataKeyPairInput, optFns ...func(*Options)) (*GenerateDataKeyPairOutput, error) { + if params == nil { + params = &GenerateDataKeyPairInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPair", params, optFns, c.addOperationGenerateDataKeyPairMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. The KMS rule that + // restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or + // to sign and verify (but not both), and the rule that permits you to use ECC KMS + // keys only to sign and verify, are not effective on data key pairs, which are + // used outside of KMS. The SM2 key spec is only available in China Regions. RSA + // and ECC asymmetric key pairs are also available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. An encryption context is a collection of non-secret + // key-value pairs that represent additional authenticated data. When you use an + // encryption context to encrypt data, you must specify the same (an exact + // case-sensitive match) encryption context to decrypt the data. An encryption + // context is supported only on operations with symmetric encryption KMS keys. On + // operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the private key. + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The plaintext copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyPlaintext []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPair{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyPairValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPair(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPair(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyPair", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go new file mode 100644 index 0000000000..5683abdb38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyPairWithoutPlaintext.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique asymmetric data key pair for use outside of KMS. This operation +// returns a plaintext public key and a copy of the private key that is encrypted +// under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, +// this operation does not return a plaintext private key. The bytes in the keys +// are random; they are not related to the caller or to the KMS key that is used to +// encrypt the private key. You can use the public key that +// GenerateDataKeyPairWithoutPlaintext returns to encrypt data or verify a +// signature outside of KMS. Then, store the encrypted private key with the data. +// When you are ready to decrypt data or sign a message, you can use the Decrypt +// operation to decrypt the encrypted private key. To generate a data key pair, you +// must specify a symmetric encryption KMS key to encrypt the private key in a data +// key pair. You cannot use an asymmetric KMS key or a KMS key in a custom key +// store. To get the type and origin of your KMS key, use the DescribeKey +// operation. Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve +// (ECC) data key pair. In China Regions, you can also choose an SM2 data key pair. +// KMS recommends that you use ECC key pairs for signing, and use RSA and SM2 key +// pairs for either encryption or signing, but not both. However, KMS cannot +// enforce any restrictions on the use of data key pairs outside of KMS. +// GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each +// request. The bytes in the key are not related to the caller or KMS key that is +// used to encrypt the private key. The public key is a DER-encoded X.509 +// SubjectPublicKeyInfo, as specified in RFC 5280 +// (https://tools.ietf.org/html/rfc5280). You can use an optional encryption +// context to add additional security to the encryption operation. If you specify +// an EncryptionContext, you must specify the same encryption context (a +// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, +// the request to decrypt fails with an InvalidCiphertextException. For more +// information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyPairWithoutPlaintext +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * GenerateDataKeyWithoutPlaintext +func (c *Client) GenerateDataKeyPairWithoutPlaintext(ctx context.Context, params *GenerateDataKeyPairWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyPairWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyPairWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyPairWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyPairWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the private key in the + // data key pair. You cannot specify an asymmetric KMS key or a KMS key in a custom + // key store. To get the type and origin of your KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Determines the type of data key pair that is generated. The KMS rule that + // restricts the use of asymmetric RSA and SM2 KMS keys to encrypt and decrypt or + // to sign and verify (but not both), and the rule that permits you to use ECC KMS + // keys only to sign and verify, are not effective on data key pairs, which are + // used outside of KMS. The SM2 key spec is only available in China Regions. RSA + // and ECC asymmetric key pairs are also available in China Regions. + // + // This member is required. + KeyPairSpec types.DataKeyPairSpec + + // Specifies the encryption context that will be used when encrypting the private + // key in the data key pair. An encryption context is a collection of non-secret + // key-value pairs that represent additional authenticated data. When you use an + // encryption context to encrypt data, you must specify the same (an exact + // case-sensitive match) encryption context to decrypt the data. An encryption + // context is supported only on operations with symmetric encryption KMS keys. On + // operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateDataKeyPairWithoutPlaintextOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the private key. + KeyId *string + + // The type of data key pair that was generated. + KeyPairSpec types.DataKeyPairSpec + + // The encrypted copy of the private key. When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PrivateKeyCiphertextBlob []byte + + // The public key (in plaintext). When you use the HTTP API or the Amazon Web + // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyPairWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyPairWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyPairWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go new file mode 100644 index 0000000000..f7fb211b4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateDataKeyWithoutPlaintext.go @@ -0,0 +1,226 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a unique symmetric data key for use outside of KMS. This operation +// returns a data key that is encrypted under a symmetric encryption KMS key that +// you specify. The bytes in the key are random; they are not related to the caller +// or to the KMS key. GenerateDataKeyWithoutPlaintext is identical to the +// GenerateDataKey operation except that it does not return a plaintext copy of the +// data key. This operation is useful for systems that need to encrypt data at some +// point, but not immediately. When you need to encrypt the data, you call the +// Decrypt operation on the encrypted copy of the key. It's also useful in +// distributed systems with different levels of trust. For example, you might store +// encrypted data in containers. One component of your system creates new +// containers and stores an encrypted data key with each container. Then, a +// different component puts the data into the containers. That component first +// decrypts the data key, uses the plaintext data key to encrypt data, puts the +// encrypted data into the container, and then destroys the plaintext data key. In +// this system, the component that creates the containers never sees the plaintext +// data key. To request an asymmetric data key pair, use the GenerateDataKeyPair or +// GenerateDataKeyPairWithoutPlaintext operations. To generate a data key, you must +// specify the symmetric encryption KMS key that is used to encrypt the data key. +// You cannot use an asymmetric KMS key or a key in a custom key store to generate +// a data key. To get the type of your KMS key, use the DescribeKey operation. If +// the operation succeeds, you will find the encrypted copy of the data key in the +// CiphertextBlob field. You can use an optional encryption context to add +// additional security to the encryption operation. If you specify an +// EncryptionContext, you must specify the same encryption context (a +// case-sensitive exact match) when decrypting the encrypted data key. Otherwise, +// the request to decrypt fails with an InvalidCiphertextException. For more +// information, see Encryption Context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateDataKeyWithoutPlaintext +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +// +// * GenerateDataKeyPairWithoutPlaintext +func (c *Client) GenerateDataKeyWithoutPlaintext(ctx context.Context, params *GenerateDataKeyWithoutPlaintextInput, optFns ...func(*Options)) (*GenerateDataKeyWithoutPlaintextOutput, error) { + if params == nil { + params = &GenerateDataKeyWithoutPlaintextInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateDataKeyWithoutPlaintext", params, optFns, c.addOperationGenerateDataKeyWithoutPlaintextMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateDataKeyWithoutPlaintextOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateDataKeyWithoutPlaintextInput struct { + + // Specifies the symmetric encryption KMS key that encrypts the data key. You + // cannot specify an asymmetric KMS key or a KMS key in a custom key store. To get + // the type and origin of your KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the encryption context that will be used when encrypting the data key. + // An encryption context is a collection of non-secret key-value pairs that + // represent additional authenticated data. When you use an encryption context to + // encrypt data, you must specify the same (an exact case-sensitive match) + // encryption context to decrypt the data. An encryption context is supported only + // on operations with symmetric encryption KMS keys. On operations with symmetric + // encryption KMS keys, an encryption context is optional, but it is strongly + // recommended. For more information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + EncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // The length of the data key. Use AES_128 to generate a 128-bit symmetric key, or + // AES_256 to generate a 256-bit symmetric key. + KeySpec types.DataKeySpec + + // The length of the data key in bytes. For example, use the value 64 to generate a + // 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit and + // 256-bit symmetric keys), we recommend that you use the KeySpec field instead of + // this one. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateDataKeyWithoutPlaintextOutput struct { + + // The encrypted data key. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that encrypted the data key. + KeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateDataKeyWithoutPlaintextMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateDataKeyWithoutPlaintext(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateDataKeyWithoutPlaintext", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go new file mode 100644 index 0000000000..ba59d87c54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateMac.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Generates a hash-based message authentication code (HMAC) for a message using an +// HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm +// computes the HMAC for the message and the key as described in RFC 2104 +// (https://datatracker.ietf.org/doc/html/rfc2104). You can use the HMAC that this +// operation generates with the VerifyMac operation to demonstrate that the +// original message has not changed. Also, because a secret key is used to create +// the hash, you can verify that the party that generated the hash has the required +// secret key. This operation is part of KMS support for HMAC KMS keys. For +// details, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide . Best practices recommend that you limit the +// time during which any signing mechanism, including an HMAC, is effective. This +// deters an attack where the actor uses a signed message to establish validity +// repeatedly or long after the message is superseded. HMAC tags do not include a +// timestamp, but you can include a timestamp in the token or message to help you +// detect when its time to refresh the HMAC. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: Yes. To perform +// this operation with a KMS key in a different Amazon Web Services account, +// specify the key ARN or alias ARN in the value of the KeyId parameter. Required +// permissions: kms:GenerateMac +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: VerifyMac +func (c *Client) GenerateMac(ctx context.Context, params *GenerateMacInput, optFns ...func(*Options)) (*GenerateMacOutput, error) { + if params == nil { + params = &GenerateMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateMac", params, optFns, c.addOperationGenerateMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateMacInput struct { + + // The HMAC KMS key to use in the operation. The MAC algorithm computes the HMAC + // for the message and the key as described in RFC 2104 + // (https://datatracker.ietf.org/doc/html/rfc2104). To identify an HMAC KMS key, + // use the DescribeKey operation and see the KeySpec field in the response. + // + // This member is required. + KeyId *string + + // The MAC algorithm used in the operation. The algorithm must be compatible with + // the HMAC KMS key that you specify. To find the MAC algorithms that your HMAC KMS + // key supports, use the DescribeKey operation and see the MacAlgorithms field in + // the DescribeKey response. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message to be hashed. Specify a message of up to 4,096 bytes. GenerateMac + // and VerifyMac do not provide special handling for message digests. If you + // generate an HMAC for a hash digest of a message, you must verify the HMAC of the + // same hash digest. + // + // This member is required. + Message []byte + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GenerateMacOutput struct { + + // The HMAC KMS key used in the operation. + KeyId *string + + // The hash-based message authentication code (HMAC) for the given message, key, + // and MAC algorithm. + Mac []byte + + // The MAC algorithm that was used to generate the HMAC. + MacAlgorithm types.MacAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateMac{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGenerateMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go new file mode 100644 index 0000000000..bf2b843577 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GenerateRandom.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a random byte string that is cryptographically secure. You must use the +// NumberOfBytes parameter to specify the length of the random byte string. There +// is no default value for string length. By default, the random byte string is +// generated in KMS. To generate the byte string in the CloudHSM cluster that is +// associated with a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), +// specify the custom key store ID. Applications in Amazon Web Services Nitro +// Enclaves can call this operation by using the Amazon Web Services Nitro Enclaves +// Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). For +// information about the supporting parameters, see How Amazon Web Services Nitro +// Enclaves use KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) +// in the Key Management Service Developer Guide. For more information about +// entropy and random number generation, see Key Management Service Cryptographic +// Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/). +// Cross-account use: Not applicable. GenerateRandom does not use any +// account-specific resources, such as KMS keys. Required permissions: +// kms:GenerateRandom +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) +func (c *Client) GenerateRandom(ctx context.Context, params *GenerateRandomInput, optFns ...func(*Options)) (*GenerateRandomOutput, error) { + if params == nil { + params = &GenerateRandomInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GenerateRandom", params, optFns, c.addOperationGenerateRandomMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GenerateRandomOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GenerateRandomInput struct { + + // Generates the random byte string in the CloudHSM cluster that is associated with + // the specified custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). + // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. + CustomKeyStoreId *string + + // The length of the random byte string. This parameter is required. + NumberOfBytes *int32 + + noSmithyDocumentSerde +} + +type GenerateRandomOutput struct { + + // The random byte string. When you use the HTTP API or the Amazon Web Services + // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. + Plaintext []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGenerateRandomMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGenerateRandom{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateRandom(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGenerateRandom(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GenerateRandom", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go new file mode 100644 index 0000000000..ca9e135bd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyPolicy.go @@ -0,0 +1,140 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a key policy attached to the specified KMS key. Cross-account use: No. You +// cannot perform this operation on a KMS key in a different Amazon Web Services +// account. Required permissions: kms:GetKeyPolicy +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: PutKeyPolicy +func (c *Client) GetKeyPolicy(ctx context.Context, params *GetKeyPolicyInput, optFns ...func(*Options)) (*GetKeyPolicyOutput, error) { + if params == nil { + params = &GetKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyPolicy", params, optFns, c.addOperationGetKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyPolicyInput struct { + + // Gets the key policy for the specified KMS key. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies the name of the key policy. The only valid name is default. To get the + // names of key policies, use ListKeyPolicies. + // + // This member is required. + PolicyName *string + + noSmithyDocumentSerde +} + +type GetKeyPolicyOutput struct { + + // A key policy document in JSON format. + Policy *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go new file mode 100644 index 0000000000..47673d2f5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetKeyRotationStatus.go @@ -0,0 +1,187 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a Boolean value that indicates whether automatic rotation of the key +// material +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) is +// enabled for the specified KMS key. When you enable automatic rotation for +// customer managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// KMS rotates the key material of the KMS key one year (approximately 365 days) +// from the enable date and every year thereafter. You can monitor rotation of the +// key material for your KMS keys in CloudTrail and Amazon CloudWatch. Automatic +// key rotation is supported only on symmetric encryption KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#symmetric-cmks). +// You cannot enable or disable automatic rotation of asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), +// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), +// KMS keys with imported key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), or +// KMS keys in a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). +// The key rotation status of these KMS keys is always false. To enable or disable +// automatic rotation of a set of related multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), +// set the property on the primary key.. You can enable (EnableKeyRotation) and +// disable automatic rotation (DisableKeyRotation) of the key material in customer +// managed KMS keys. Key material rotation of Amazon Web Services managed KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) +// is not configurable. KMS always rotates the key material in Amazon Web Services +// managed KMS keys every year. The key rotation status for Amazon Web Services +// managed KMS keys is always true. In May 2022, KMS changed the rotation schedule +// for Amazon Web Services managed keys from every three years to every year. For +// details, see EnableKeyRotation. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. +// +// * Disabled: The key rotation status +// does not change when you disable a KMS key. However, while the KMS key is +// disabled, KMS does not rotate the key material. When you re-enable the KMS key, +// rotation resumes. If the key material in the re-enabled KMS key hasn't been +// rotated in one year, KMS rotates it immediately, and every year thereafter. If +// it's been less than a year since the key material in the re-enabled KMS key was +// rotated, the KMS key resumes its prior rotation schedule. +// +// * Pending deletion: +// While a KMS key is pending deletion, its key rotation status is false and KMS +// does not rotate the key material. If you cancel the deletion, the original key +// rotation status returns to true. +// +// Cross-account use: Yes. To perform this +// operation on a KMS key in a different Amazon Web Services account, specify the +// key ARN in the value of the KeyId parameter. Required permissions: +// kms:GetKeyRotationStatus +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DisableKeyRotation +// +// * EnableKeyRotation +func (c *Client) GetKeyRotationStatus(ctx context.Context, params *GetKeyRotationStatusInput, optFns ...func(*Options)) (*GetKeyRotationStatusOutput, error) { + if params == nil { + params = &GetKeyRotationStatusInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetKeyRotationStatus", params, optFns, c.addOperationGetKeyRotationStatusMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetKeyRotationStatusOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetKeyRotationStatusInput struct { + + // Gets the rotation status for the specified KMS key. Specify the key ID or key + // ARN of the KMS key. To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type GetKeyRotationStatusOutput struct { + + // A Boolean value that specifies whether key rotation is enabled. + KeyRotationEnabled bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetKeyRotationStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetKeyRotationStatus{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetKeyRotationStatusValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetKeyRotationStatus(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetKeyRotationStatus(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetKeyRotationStatus", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go new file mode 100644 index 0000000000..88995874cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetParametersForImport.go @@ -0,0 +1,190 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Returns the items you need to import key material into a symmetric encryption +// KMS key. For more information about importing key material into KMS, see +// Importing key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. This operation returns a public key +// and an import token. Use the public key to encrypt the symmetric key material. +// Store the import token to send with a subsequent ImportKeyMaterial request. You +// must specify the key ID of the symmetric encryption KMS key into which you will +// import key material. This KMS key's Origin must be EXTERNAL. You must also +// specify the wrapping algorithm and type of wrapping key (public key) that you +// will use to encrypt the key material. You cannot perform this operation on an +// asymmetric KMS key, an HMAC KMS key, or on any KMS key in a different Amazon Web +// Services account. To import key material, you must use the public key and import +// token from the same response. These items are valid for 24 hours. The expiration +// date and time appear in the GetParametersForImport response. You cannot use an +// expired token in an ImportKeyMaterial request. If your key and token expire, +// send another GetParametersForImport request. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:GetParametersForImport +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * ImportKeyMaterial +// +// * +// DeleteImportedKeyMaterial +func (c *Client) GetParametersForImport(ctx context.Context, params *GetParametersForImportInput, optFns ...func(*Options)) (*GetParametersForImportOutput, error) { + if params == nil { + params = &GetParametersForImportInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetParametersForImport", params, optFns, c.addOperationGetParametersForImportMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetParametersForImportOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetParametersForImportInput struct { + + // The identifier of the symmetric encryption KMS key into which you will import + // key material. The Origin of the KMS key must be EXTERNAL. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The algorithm you will use to encrypt the key material before importing it with + // ImportKeyMaterial. For more information, see Encrypt the Key Material + // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) + // in the Key Management Service Developer Guide. + // + // This member is required. + WrappingAlgorithm types.AlgorithmSpec + + // The type of wrapping key (public key) to return in the response. Only 2048-bit + // RSA public keys are supported. + // + // This member is required. + WrappingKeySpec types.WrappingKeySpec + + noSmithyDocumentSerde +} + +type GetParametersForImportOutput struct { + + // The import token to send in a subsequent ImportKeyMaterial request. + ImportToken []byte + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key to use in a subsequent ImportKeyMaterial request. This is the + // same KMS key specified in the GetParametersForImport request. + KeyId *string + + // The time at which the import token and public key are no longer valid. After + // this time, you cannot use them to make an ImportKeyMaterial request and you must + // send another GetParametersForImport request to get new ones. + ParametersValidTo *time.Time + + // The public key to use to encrypt the key material before importing it with + // ImportKeyMaterial. + PublicKey []byte + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetParametersForImportMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetParametersForImport{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetParametersForImportValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetParametersForImport(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetParametersForImport(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetParametersForImport", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go new file mode 100644 index 0000000000..a7e8e4006f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_GetPublicKey.go @@ -0,0 +1,236 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the public key of an asymmetric KMS key. Unlike the private key of a +// asymmetric KMS key, which never leaves KMS unencrypted, callers with +// kms:GetPublicKey permission can download the public key of an asymmetric KMS +// key. You can share the public key to allow others to encrypt messages and verify +// signatures outside of KMS. For information about asymmetric KMS keys, see +// Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. You do not need to download the +// public key. Instead, you can use the public key within KMS by calling the +// Encrypt, ReEncrypt, or Verify operations with the identifier of an asymmetric +// KMS key. When you use the public key within KMS, you benefit from the +// authentication, authorization, and logging that are part of every KMS operation. +// You also reduce of risk of encrypting data that cannot be decrypted. These +// features are not effective outside of KMS. To verify a signature outside of KMS +// with an SM2 public key (China Regions only), you must specify the distinguishing +// ID. By default, KMS uses 1234567812345678 as the distinguishing ID. For more +// information, see Offline verification with SM2 key pairs +// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification). +// To help you use the public key safely outside of KMS, GetPublicKey returns +// important information about the public key in the response, including: +// +// * +// KeySpec +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec): +// The type of key material in the public key, such as RSA_4096 or +// ECC_NIST_P521. +// +// * KeyUsage +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): +// Whether the key is used for encryption or signing. +// +// * EncryptionAlgorithms +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) +// or SigningAlgorithms +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): +// A list of the encryption algorithms or the signing algorithms for the +// key. +// +// Although KMS cannot enforce these restrictions on external operations, it +// is crucial that you use this information to prevent the public key from being +// used improperly. For example, you can prevent a public signing key from being +// used encrypt data, or prevent a public key from being used with an encryption +// algorithm that is not supported by KMS. You can also avoid errors, such as using +// the wrong signing algorithm in a verification operation. The KMS key that you +// use for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:GetPublicKey +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: CreateKey +func (c *Client) GetPublicKey(ctx context.Context, params *GetPublicKeyInput, optFns ...func(*Options)) (*GetPublicKeyOutput, error) { + if params == nil { + params = &GetPublicKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetPublicKey", params, optFns, c.addOperationGetPublicKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetPublicKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetPublicKeyInput struct { + + // Identifies the asymmetric KMS key that includes the public key. To specify a KMS + // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type GetPublicKeyOutput struct { + + // Instead, use the KeySpec field in the GetPublicKey response. The KeySpec and + // CustomerMasterKeySpec fields have the same value. We recommend that you use the + // KeySpec field in your code. However, to avoid breaking changes, KMS will support + // both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec types.CustomerMasterKeySpec + + // The encryption algorithms that KMS supports for this key. This information is + // critical. If a public key encrypts data outside of KMS by using an unsupported + // encryption algorithm, the ciphertext cannot be decrypted. This field appears in + // the response only when the KeyUsage of the public key is ENCRYPT_DECRYPT. + EncryptionAlgorithms []types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key from which the public key was downloaded. + KeyId *string + + // The type of the of the public key that was downloaded. + KeySpec types.KeySpec + + // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or + // SIGN_VERIFY. This information is critical. If a public key with SIGN_VERIFY key + // usage encrypts data outside of KMS, the ciphertext cannot be decrypted. + KeyUsage types.KeyUsageType + + // The exported public key. The value is a DER-encoded X.509 public key, also known + // as SubjectPublicKeyInfo (SPKI), as defined in RFC 5280 + // (https://tools.ietf.org/html/rfc5280). When you use the HTTP API or the Amazon + // Web Services CLI, the value is Base64-encoded. Otherwise, it is not + // Base64-encoded. + PublicKey []byte + + // The signing algorithms that KMS supports for this key. This field appears in the + // response only when the KeyUsage of the public key is SIGN_VERIFY. + SigningAlgorithms []types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetPublicKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpGetPublicKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetPublicKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetPublicKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opGetPublicKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "GetPublicKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go new file mode 100644 index 0000000000..78dc744c29 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ImportKeyMaterial.go @@ -0,0 +1,212 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Imports key material into an existing symmetric encryption KMS key that was +// created without key material. After you successfully import key material into a +// KMS key, you can reimport the same key material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) +// into that KMS key, but you cannot import different key material. You cannot +// perform this operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS +// key in a different Amazon Web Services account. For more information about +// creating KMS keys with no key material and then importing key material, see +// Importing Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) in +// the Key Management Service Developer Guide. Before using this operation, call +// GetParametersForImport. Its response includes a public key and an import token. +// Use the public key to encrypt the key material. Then, submit the import token +// from the same GetParametersForImport response. When calling this operation, you +// must specify the following values: +// +// * The key ID or key ARN of a KMS key with no +// key material. Its Origin must be EXTERNAL. To create a KMS key with no key +// material, call CreateKey and set the value of its Origin parameter to EXTERNAL. +// To get the Origin of a KMS key, call DescribeKey.) +// +// * The encrypted key +// material. To get the public key to encrypt the key material, call +// GetParametersForImport. +// +// * The import token that GetParametersForImport +// returned. You must use a public key and token from the same +// GetParametersForImport response. +// +// * Whether the key material expires and if so, +// when. If you set an expiration date, KMS deletes the key material from the KMS +// key on the specified date, and the KMS key becomes unusable. To use the KMS key +// again, you must reimport the same key material. The only way to change an +// expiration date is by reimporting the same key material and specifying a new +// expiration date. +// +// When this operation is successful, the key state of the KMS +// key changes from PendingImport to Enabled, and you can use the KMS key. If this +// operation fails, use the exception to help determine the problem. If the error +// is related to the key material, the import token, or wrapping key, use +// GetParametersForImport to get a new public key and import token for the KMS key +// and repeat the import procedure. For help, see How To Import Key Material +// (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) +// in the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:ImportKeyMaterial +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * DeleteImportedKeyMaterial +// +// * +// GetParametersForImport +func (c *Client) ImportKeyMaterial(ctx context.Context, params *ImportKeyMaterialInput, optFns ...func(*Options)) (*ImportKeyMaterialOutput, error) { + if params == nil { + params = &ImportKeyMaterialInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ImportKeyMaterial", params, optFns, c.addOperationImportKeyMaterialMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ImportKeyMaterialOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ImportKeyMaterialInput struct { + + // The encrypted key material to import. The key material must be encrypted with + // the public wrapping key that GetParametersForImport returned, using the wrapping + // algorithm that you specified in the same GetParametersForImport request. + // + // This member is required. + EncryptedKeyMaterial []byte + + // The import token that you received in the response to a previous + // GetParametersForImport request. It must be from the same response that contained + // the public key that you used to encrypt the key material. + // + // This member is required. + ImportToken []byte + + // The identifier of the symmetric encryption KMS key that receives the imported + // key material. This must be the same KMS key specified in the KeyID parameter of + // the corresponding GetParametersForImport request. The Origin of the KMS key must + // be EXTERNAL. You cannot perform this operation on an asymmetric KMS key, an HMAC + // KMS key, a KMS key in a custom key store, or on a KMS key in a different Amazon + // Web Services account Specify the key ID or key ARN of the KMS key. For + // example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES, + // in which case you must include the ValidTo parameter. When this parameter is set + // to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter. + ExpirationModel types.ExpirationModelType + + // The time at which the imported key material expires. When the key material + // expires, KMS deletes the key material and the KMS key becomes unusable. You must + // omit this parameter when the ExpirationModel parameter is set to + // KEY_MATERIAL_DOES_NOT_EXPIRE. Otherwise it is required. + ValidTo *time.Time + + noSmithyDocumentSerde +} + +type ImportKeyMaterialOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationImportKeyMaterialMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpImportKeyMaterial{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpImportKeyMaterialValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportKeyMaterial(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opImportKeyMaterial(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ImportKeyMaterial", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go new file mode 100644 index 0000000000..10f9345036 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListAliases.go @@ -0,0 +1,270 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of aliases in the caller's Amazon Web Services account and region. +// For more information about aliases, see CreateAlias. By default, the ListAliases +// operation returns all aliases in the account and region. To get only the aliases +// associated with a particular KMS key, use the KeyId parameter. The ListAliases +// response can include aliases that you created and associated with your customer +// managed keys, and aliases that Amazon Web Services created and associated with +// Amazon Web Services managed keys in your account. You can recognize Amazon Web +// Services aliases because their names have the format aws/, such as aws/dynamodb. +// The response might also include aliases that have no TargetKeyId field. These +// are predefined aliases that Amazon Web Services has created but has not yet +// associated with a KMS key. Aliases that Amazon Web Services creates in your +// account, including predefined aliases, do not count against your KMS aliases +// quota +// (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). +// Cross-account use: No. ListAliases does not return aliases in other Amazon Web +// Services accounts. Required permissions: kms:ListAliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * DeleteAlias +// +// * UpdateAlias +func (c *Client) ListAliases(ctx context.Context, params *ListAliasesInput, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if params == nil { + params = &ListAliasesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListAliases", params, optFns, c.addOperationListAliasesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListAliasesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListAliasesInput struct { + + // Lists only aliases that are associated with the specified KMS key. Enter a KMS + // key in your Amazon Web Services account. This parameter is optional. If you omit + // it, ListAliases returns all aliases in the account and Region. Specify the key + // ID or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListAliasesOutput struct { + + // A list of aliases. + Aliases []types.AliasListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListAliasesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListAliases{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAliases(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListAliasesAPIClient is a client that implements the ListAliases operation. +type ListAliasesAPIClient interface { + ListAliases(context.Context, *ListAliasesInput, ...func(*Options)) (*ListAliasesOutput, error) +} + +var _ ListAliasesAPIClient = (*Client)(nil) + +// ListAliasesPaginatorOptions is the paginator options for ListAliases +type ListAliasesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListAliasesPaginator is a paginator for ListAliases +type ListAliasesPaginator struct { + options ListAliasesPaginatorOptions + client ListAliasesAPIClient + params *ListAliasesInput + nextToken *string + firstPage bool +} + +// NewListAliasesPaginator returns a new ListAliasesPaginator +func NewListAliasesPaginator(client ListAliasesAPIClient, params *ListAliasesInput, optFns ...func(*ListAliasesPaginatorOptions)) *ListAliasesPaginator { + if params == nil { + params = &ListAliasesInput{} + } + + options := ListAliasesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListAliasesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListAliasesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListAliases page. +func (p *ListAliasesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListAliasesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListAliases(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListAliases(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListAliases", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go new file mode 100644 index 0000000000..21b11e1243 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListGrants.go @@ -0,0 +1,283 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all grants for the specified KMS key. You must specify the KMS +// key in all requests. You can filter the grant list by grant ID or grantee +// principal. For detailed information about grants, including grant terminology, +// see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// The GranteePrincipal field in the ListGrants response usually contains the user +// or role designated as the grantee principal in the grant. However, when the +// grantee principal in the grant is an Amazon Web Services service, the +// GranteePrincipal field contains the service principal +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), +// which might represent several different grantee principals. Cross-account use: +// Yes. To perform this operation on a KMS key in a different Amazon Web Services +// account, specify the key ARN in the value of the KeyId parameter. Required +// permissions: kms:ListGrants +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * CreateGrant +// +// * ListRetirableGrants +// +// * +// RetireGrant +// +// * RevokeGrant +func (c *Client) ListGrants(ctx context.Context, params *ListGrantsInput, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if params == nil { + params = &ListGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListGrants", params, optFns, c.addOperationListGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListGrantsInput struct { + + // Returns only grants for the specified KMS key. This parameter is required. + // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a + // different Amazon Web Services account, you must use the key ARN. For example: + // + // * + // Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Returns only the grant with the specified grant ID. The grant ID uniquely + // identifies the grant. + GrantId *string + + // Returns only grants where the specified principal is the grantee principal for + // the grant. + GranteePrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListGrants{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListGrantsAPIClient is a client that implements the ListGrants operation. +type ListGrantsAPIClient interface { + ListGrants(context.Context, *ListGrantsInput, ...func(*Options)) (*ListGrantsOutput, error) +} + +var _ ListGrantsAPIClient = (*Client)(nil) + +// ListGrantsPaginatorOptions is the paginator options for ListGrants +type ListGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListGrantsPaginator is a paginator for ListGrants +type ListGrantsPaginator struct { + options ListGrantsPaginatorOptions + client ListGrantsAPIClient + params *ListGrantsInput + nextToken *string + firstPage bool +} + +// NewListGrantsPaginator returns a new ListGrantsPaginator +func NewListGrantsPaginator(client ListGrantsAPIClient, params *ListGrantsInput, optFns ...func(*ListGrantsPaginatorOptions)) *ListGrantsPaginator { + if params == nil { + params = &ListGrantsInput{} + } + + options := ListGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListGrants page. +func (p *ListGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go new file mode 100644 index 0000000000..a6c31313e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeyPolicies.go @@ -0,0 +1,257 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets the names of the key policies that are attached to a KMS key. This +// operation is designed to get policy names that you can use in a GetKeyPolicy +// operation. However, the only valid policy name is default. Cross-account use: +// No. You cannot perform this operation on a KMS key in a different Amazon Web +// Services account. Required permissions: kms:ListKeyPolicies +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * GetKeyPolicy +// +// * PutKeyPolicy +func (c *Client) ListKeyPolicies(ctx context.Context, params *ListKeyPoliciesInput, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeyPolicies", params, optFns, c.addOperationListKeyPoliciesMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeyPoliciesOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeyPoliciesInput struct { + + // Gets the names of key policies for the specified KMS key. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. Only one policy can be attached to a key. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeyPoliciesOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A list of key policy names. The only valid value is default. + PolicyNames []string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeyPoliciesMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeyPolicies{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListKeyPoliciesValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeyPolicies(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListKeyPoliciesAPIClient is a client that implements the ListKeyPolicies +// operation. +type ListKeyPoliciesAPIClient interface { + ListKeyPolicies(context.Context, *ListKeyPoliciesInput, ...func(*Options)) (*ListKeyPoliciesOutput, error) +} + +var _ ListKeyPoliciesAPIClient = (*Client)(nil) + +// ListKeyPoliciesPaginatorOptions is the paginator options for ListKeyPolicies +type ListKeyPoliciesPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. Only one policy can be attached to a key. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeyPoliciesPaginator is a paginator for ListKeyPolicies +type ListKeyPoliciesPaginator struct { + options ListKeyPoliciesPaginatorOptions + client ListKeyPoliciesAPIClient + params *ListKeyPoliciesInput + nextToken *string + firstPage bool +} + +// NewListKeyPoliciesPaginator returns a new ListKeyPoliciesPaginator +func NewListKeyPoliciesPaginator(client ListKeyPoliciesAPIClient, params *ListKeyPoliciesInput, optFns ...func(*ListKeyPoliciesPaginatorOptions)) *ListKeyPoliciesPaginator { + if params == nil { + params = &ListKeyPoliciesInput{} + } + + options := ListKeyPoliciesPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeyPoliciesPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeyPoliciesPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeyPolicies page. +func (p *ListKeyPoliciesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeyPoliciesOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListKeyPolicies(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListKeyPolicies(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListKeyPolicies", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go new file mode 100644 index 0000000000..c554f536e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListKeys.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Gets a list of all KMS keys in the caller's Amazon Web Services account and +// Region. Cross-account use: No. You cannot perform this operation on a KMS key in +// a different Amazon Web Services account. Required permissions: kms:ListKeys +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * CreateKey +// +// * DescribeKey +// +// * ListAliases +// +// * +// ListResourceTags +func (c *Client) ListKeys(ctx context.Context, params *ListKeysInput, optFns ...func(*Options)) (*ListKeysOutput, error) { + if params == nil { + params = &ListKeysInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListKeys", params, optFns, c.addOperationListKeysMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListKeysOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListKeysInput struct { + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListKeysOutput struct { + + // A list of KMS keys. + Keys []types.KeyListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListKeysMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListKeys{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListKeys(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListKeysAPIClient is a client that implements the ListKeys operation. +type ListKeysAPIClient interface { + ListKeys(context.Context, *ListKeysInput, ...func(*Options)) (*ListKeysOutput, error) +} + +var _ ListKeysAPIClient = (*Client)(nil) + +// ListKeysPaginatorOptions is the paginator options for ListKeys +type ListKeysPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 1000, inclusive. If you do not include a value, it + // defaults to 100. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListKeysPaginator is a paginator for ListKeys +type ListKeysPaginator struct { + options ListKeysPaginatorOptions + client ListKeysAPIClient + params *ListKeysInput + nextToken *string + firstPage bool +} + +// NewListKeysPaginator returns a new ListKeysPaginator +func NewListKeysPaginator(client ListKeysAPIClient, params *ListKeysInput, optFns ...func(*ListKeysPaginatorOptions)) *ListKeysPaginator { + if params == nil { + params = &ListKeysInput{} + } + + options := ListKeysPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListKeysPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListKeysPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListKeys page. +func (p *ListKeysPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListKeysOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListKeys(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListKeys(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListKeys", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go new file mode 100644 index 0000000000..1473b31cd4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListResourceTags.go @@ -0,0 +1,272 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns all tags on the specified KMS key. For general information about tags, +// including the format and syntax, see Tagging Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. For information about using tags in KMS, see +// Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). +// Cross-account use: No. You cannot perform this operation on a KMS key in a +// different Amazon Web Services account. Required permissions: +// kms:ListResourceTags +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: +// +// * CreateKey +// +// * ReplicateKey +// +// * TagResource +// +// * +// UntagResource +func (c *Client) ListResourceTags(ctx context.Context, params *ListResourceTagsInput, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if params == nil { + params = &ListResourceTagsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListResourceTags", params, optFns, c.addOperationListResourceTagsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListResourceTagsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListResourceTagsInput struct { + + // Gets tags on the specified KMS key. Specify the key ID or key ARN of the KMS + // key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 50, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. Do not attempt to construct this value. Use only the value of + // NextMarker from the truncated response you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListResourceTagsOutput struct { + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. Do not assume or infer any + // information from this value. + NextMarker *string + + // A list of tags. Each tag consists of a tag key and a tag value. Tagging or + // untagging a KMS key can allow or deny permission to the KMS key. For details, + // see ABAC in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. + Tags []types.Tag + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListResourceTagsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListResourceTags{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListResourceTagsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListResourceTags(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListResourceTagsAPIClient is a client that implements the ListResourceTags +// operation. +type ListResourceTagsAPIClient interface { + ListResourceTags(context.Context, *ListResourceTagsInput, ...func(*Options)) (*ListResourceTagsOutput, error) +} + +var _ ListResourceTagsAPIClient = (*Client)(nil) + +// ListResourceTagsPaginatorOptions is the paginator options for ListResourceTags +type ListResourceTagsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 50, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListResourceTagsPaginator is a paginator for ListResourceTags +type ListResourceTagsPaginator struct { + options ListResourceTagsPaginatorOptions + client ListResourceTagsAPIClient + params *ListResourceTagsInput + nextToken *string + firstPage bool +} + +// NewListResourceTagsPaginator returns a new ListResourceTagsPaginator +func NewListResourceTagsPaginator(client ListResourceTagsAPIClient, params *ListResourceTagsInput, optFns ...func(*ListResourceTagsPaginatorOptions)) *ListResourceTagsPaginator { + if params == nil { + params = &ListResourceTagsInput{} + } + + options := ListResourceTagsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListResourceTagsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListResourceTagsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListResourceTags page. +func (p *ListResourceTagsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListResourceTagsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListResourceTags(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListResourceTags(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListResourceTags", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go new file mode 100644 index 0000000000..d15ead7208 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ListRetirableGrants.go @@ -0,0 +1,274 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns information about all grants in the Amazon Web Services account and +// Region that have the specified retiring principal. You can specify any principal +// in your Amazon Web Services account. The grants that are returned include grants +// for KMS keys in your Amazon Web Services account and other Amazon Web Services +// accounts. You might use this operation to determine which grants you may retire. +// To retire a grant, use the RetireGrant operation. For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: You must specify a principal in your Amazon Web Services +// account. However, this operation can return grants in any Amazon Web Services +// account. You do not need kms:ListRetirableGrants permission (or any other +// additional permission) in any Amazon Web Services account other than your own. +// Required permissions: kms:ListRetirableGrants +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) in your Amazon Web Services account. Related operations: +// +// * +// CreateGrant +// +// * ListGrants +// +// * RetireGrant +// +// * RevokeGrant +func (c *Client) ListRetirableGrants(ctx context.Context, params *ListRetirableGrantsInput, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListRetirableGrants", params, optFns, c.addOperationListRetirableGrantsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListRetirableGrantsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListRetirableGrantsInput struct { + + // The retiring principal for which to list grants. Enter a principal in your + // Amazon Web Services account. To specify the retiring principal, use the Amazon + // Resource Name (ARN) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of + // an Amazon Web Services principal. Valid Amazon Web Services principals include + // Amazon Web Services accounts (root), IAM users, federated users, and assumed + // role users. For examples of the ARN syntax for specifying a principal, see + // Amazon Web Services Identity and Access Management (IAM) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) + // in the Example ARNs section of the Amazon Web Services General Reference. + // + // This member is required. + RetiringPrincipal *string + + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit *int32 + + // Use this parameter in a subsequent request after you receive a response with + // truncated results. Set it to the value of NextMarker from the truncated response + // you just received. + Marker *string + + noSmithyDocumentSerde +} + +type ListRetirableGrantsOutput struct { + + // A list of grants. + Grants []types.GrantListEntry + + // When Truncated is true, this element is present and contains the value to use + // for the Marker parameter in a subsequent request. + NextMarker *string + + // A flag that indicates whether there are more items in the list. When this value + // is true, the list in this response is truncated. To get more items, pass the + // value of the NextMarker element in thisresponse to the Marker parameter in a + // subsequent request. + Truncated bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListRetirableGrantsMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpListRetirableGrants{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpListRetirableGrantsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRetirableGrants(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +// ListRetirableGrantsAPIClient is a client that implements the ListRetirableGrants +// operation. +type ListRetirableGrantsAPIClient interface { + ListRetirableGrants(context.Context, *ListRetirableGrantsInput, ...func(*Options)) (*ListRetirableGrantsOutput, error) +} + +var _ ListRetirableGrantsAPIClient = (*Client)(nil) + +// ListRetirableGrantsPaginatorOptions is the paginator options for +// ListRetirableGrants +type ListRetirableGrantsPaginatorOptions struct { + // Use this parameter to specify the maximum number of items to return. When this + // value is present, KMS does not return more than the specified number of items, + // but it might return fewer. This value is optional. If you include a value, it + // must be between 1 and 100, inclusive. If you do not include a value, it defaults + // to 50. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListRetirableGrantsPaginator is a paginator for ListRetirableGrants +type ListRetirableGrantsPaginator struct { + options ListRetirableGrantsPaginatorOptions + client ListRetirableGrantsAPIClient + params *ListRetirableGrantsInput + nextToken *string + firstPage bool +} + +// NewListRetirableGrantsPaginator returns a new ListRetirableGrantsPaginator +func NewListRetirableGrantsPaginator(client ListRetirableGrantsAPIClient, params *ListRetirableGrantsInput, optFns ...func(*ListRetirableGrantsPaginatorOptions)) *ListRetirableGrantsPaginator { + if params == nil { + params = &ListRetirableGrantsInput{} + } + + options := ListRetirableGrantsPaginatorOptions{} + if params.Limit != nil { + options.Limit = *params.Limit + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListRetirableGrantsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.Marker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListRetirableGrantsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListRetirableGrants page. +func (p *ListRetirableGrantsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListRetirableGrantsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.Marker = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.Limit = limit + + result, err := p.client.ListRetirableGrants(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.NextMarker + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListRetirableGrants(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ListRetirableGrants", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go new file mode 100644 index 0000000000..20b01fc8fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_PutKeyPolicy.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Attaches a key policy to the specified KMS key. For more information about key +// policies, see Key Policies +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the +// Key Management Service Developer Guide. For help writing and formatting a JSON +// policy document, see the IAM JSON Policy Reference +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in +// the Identity and Access Management User Guide . For examples of adding a key +// policy in multiple programming languages, see Setting a key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) +// in the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:PutKeyPolicy +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: GetKeyPolicy +func (c *Client) PutKeyPolicy(ctx context.Context, params *PutKeyPolicyInput, optFns ...func(*Options)) (*PutKeyPolicyOutput, error) { + if params == nil { + params = &PutKeyPolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutKeyPolicy", params, optFns, c.addOperationPutKeyPolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutKeyPolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutKeyPolicyInput struct { + + // Sets the key policy on the specified KMS key. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The key policy to attach to the KMS key. The key policy must meet the following + // criteria: + // + // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key + // policy must allow the principal that is making the PutKeyPolicy request to make + // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk that the + // KMS key becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide. + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Amazon Web Services Identity and Access Management User Guide. + // + // A key + // policy document can include only the following characters: + // + // * Printable ASCII + // characters from the space character (\u0020) through the end of the ASCII + // character range. + // + // * Printable characters in the Basic Latin and Latin-1 + // Supplement character set (through \u00FF). + // + // * The tab (\u0009), line feed + // (\u000A), and carriage return (\u000D) special characters + // + // For information about + // key policies, see Key policies in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the + // Key Management Service Developer Guide. For help writing and formatting a JSON + // policy document, see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + // + // This member is required. + Policy *string + + // The name of the key policy. The only valid value is default. + // + // This member is required. + PolicyName *string + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide. Use this parameter only + // when you intend to prevent the principal that is making the request from making + // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + noSmithyDocumentSerde +} + +type PutKeyPolicyOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutKeyPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpPutKeyPolicy{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutKeyPolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutKeyPolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opPutKeyPolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "PutKeyPolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go new file mode 100644 index 0000000000..26f7bb6025 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReEncrypt.go @@ -0,0 +1,329 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use this +// operation to change the KMS key under which data is encrypted, such as when you +// manually rotate +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) +// a KMS key or change the KMS key that protects a ciphertext. You can also use it +// to reencrypt ciphertext under the same KMS key, such as to change the encryption +// context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// of a ciphertext. The ReEncrypt operation can decrypt ciphertext that was +// encrypted by using a KMS key in an KMS operation, such as Encrypt or +// GenerateDataKey. It can also decrypt ciphertext that was encrypted by using the +// public key of an asymmetric KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) +// outside of KMS. However, it cannot decrypt ciphertext produced by other +// libraries, such as the Amazon Web Services Encryption SDK +// (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) or Amazon +// S3 client-side encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). +// These libraries return a ciphertext format that is incompatible with KMS. When +// you use the ReEncrypt operation, you need to provide information for the decrypt +// operation and the subsequent encrypt operation. +// +// * If your ciphertext was +// encrypted under an asymmetric KMS key, you must use the SourceKeyId parameter to +// identify the KMS key that encrypted the ciphertext. You must also supply the +// encryption algorithm that was used. This information is required to decrypt the +// data. +// +// * If your ciphertext was encrypted under a symmetric encryption KMS key, +// the SourceKeyId parameter is optional. KMS can get this information from +// metadata that it adds to the symmetric ciphertext blob. This feature adds +// durability to your implementation by ensuring that authorized users can decrypt +// ciphertext decades after it was encrypted, even if they've lost track of the key +// ID. However, specifying the source KMS key is always recommended as a best +// practice. When you use the SourceKeyId parameter to specify a KMS key, KMS uses +// only the KMS key you specify. If the ciphertext was encrypted under a different +// KMS key, the ReEncrypt operation fails. This practice ensures that you use the +// KMS key that you intend. +// +// * To reencrypt the data, you must use the +// DestinationKeyId parameter specify the KMS key that re-encrypts the data after +// it is decrypted. If the destination KMS key is an asymmetric KMS key, you must +// also provide the encryption algorithm. The algorithm that you choose must be +// compatible with the KMS key. When you use an asymmetric KMS key to encrypt or +// reencrypt data, be sure to record the KMS key and encryption algorithm that you +// choose. You will be required to provide the same KMS key and encryption +// algorithm when you decrypt the data. If the KMS key and algorithm do not match +// the values used to encrypt the data, the decrypt operation fails. You are not +// required to supply the key ID and encryption algorithm when you decrypt with +// symmetric encryption KMS keys because KMS stores this information in the +// ciphertext blob. KMS cannot store metadata in ciphertext generated with +// asymmetric keys. The standard format for asymmetric key ciphertext does not +// include configurable fields. +// +// The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. The source KMS +// key and destination KMS key can be in different Amazon Web Services accounts. +// Either or both KMS keys can be in a different account than the caller. To +// specify a KMS key in a different account, you must use its key ARN or alias ARN. +// Required permissions: +// +// * kms:ReEncryptFrom +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the source KMS key (key policy) +// +// * kms:ReEncryptTo +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// permission on the destination KMS key (key policy) +// +// To permit reencryption from +// or to a KMS key, include the "kms:ReEncrypt*" permission in your key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). This +// permission is automatically included in the key policy when you use the console +// to create a KMS key. But you must include it manually when you create a KMS key +// programmatically or when you use the PutKeyPolicy operation to set a key policy. +// Related operations: +// +// * Decrypt +// +// * Encrypt +// +// * GenerateDataKey +// +// * +// GenerateDataKeyPair +func (c *Client) ReEncrypt(ctx context.Context, params *ReEncryptInput, optFns ...func(*Options)) (*ReEncryptOutput, error) { + if params == nil { + params = &ReEncryptInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReEncrypt", params, optFns, c.addOperationReEncryptMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReEncryptOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReEncryptInput struct { + + // Ciphertext of the data to reencrypt. + // + // This member is required. + CiphertextBlob []byte + + // A unique identifier for the KMS key that is used to reencrypt the data. Specify + // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value of + // ENCRYPT_DECRYPT. To find the KeyUsage value of a KMS key, use the DescribeKey + // operation. To specify a KMS key, use its key ID, key ARN, alias name, or alias + // ARN. When using an alias name, prefix it with "alias/". To specify a KMS key in + // a different Amazon Web Services account, you must use the key ARN or alias ARN. + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + DestinationKeyId *string + + // Specifies the encryption algorithm that KMS will use to reecrypt the data after + // it has decrypted it. The default value, SYMMETRIC_DEFAULT, represents the + // encryption algorithm used for symmetric encryption KMS keys. This parameter is + // required only when the destination KMS key is an asymmetric KMS key. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies that encryption context to use when the reencrypting the data. A + // destination encryption context is valid only when the destination KMS key is a + // symmetric encryption KMS key. The standard ciphertext format for asymmetric KMS + // keys does not include fields for metadata. An encryption context is a collection + // of non-secret key-value pairs that represent additional authenticated data. When + // you use an encryption context to encrypt data, you must specify the same (an + // exact case-sensitive match) encryption context to decrypt the data. An + // encryption context is supported only on operations with symmetric encryption KMS + // keys. On operations with symmetric encryption KMS keys, an encryption context is + // optional, but it is strongly recommended. For more information, see Encryption + // context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + DestinationEncryptionContext map[string]string + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext + // before it is reencrypted. The default value, SYMMETRIC_DEFAULT, represents the + // algorithm used for symmetric encryption KMS keys. Specify the same algorithm + // that was used to encrypt the ciphertext. If you specify a different algorithm, + // the decrypt attempt fails. This parameter is required only when the ciphertext + // was encrypted under an asymmetric KMS key. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Specifies the encryption context to use to decrypt the ciphertext. Enter the + // same encryption context that was used to encrypt the ciphertext. An encryption + // context is a collection of non-secret key-value pairs that represent additional + // authenticated data. When you use an encryption context to encrypt data, you must + // specify the same (an exact case-sensitive match) encryption context to decrypt + // the data. An encryption context is supported only on operations with symmetric + // encryption KMS keys. On operations with symmetric encryption KMS keys, an + // encryption context is optional, but it is strongly recommended. For more + // information, see Encryption context + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // in the Key Management Service Developer Guide. + SourceEncryptionContext map[string]string + + // Specifies the KMS key that KMS will use to decrypt the ciphertext before it is + // re-encrypted. Enter a key ID of the KMS key that was used to encrypt the + // ciphertext. If you identify a different KMS key, the ReEncrypt operation throws + // an IncorrectKeyException. This parameter is required only when the ciphertext + // was encrypted under an asymmetric KMS key. If you used a symmetric encryption + // KMS key, KMS can get the KMS key from metadata that it adds to the symmetric + // ciphertext blob. However, it is always recommended as a best practice. This + // practice ensures that you use the KMS key that you intend. To specify a KMS key, + // use its key ID, key ARN, alias name, or alias ARN. When using an alias name, + // prefix it with "alias/". To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + SourceKeyId *string + + noSmithyDocumentSerde +} + +type ReEncryptOutput struct { + + // The reencrypted data. When you use the HTTP API or the Amazon Web Services CLI, + // the value is Base64-encoded. Otherwise, it is not Base64-encoded. + CiphertextBlob []byte + + // The encryption algorithm that was used to reencrypt the data. + DestinationEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key that was used to reencrypt the data. + KeyId *string + + // The encryption algorithm that was used to decrypt the ciphertext before it was + // reencrypted. + SourceEncryptionAlgorithm types.EncryptionAlgorithmSpec + + // Unique identifier of the KMS key used to originally encrypt the data. + SourceKeyId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReEncryptMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReEncrypt{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpReEncryptValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReEncrypt(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReEncrypt(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ReEncrypt", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go new file mode 100644 index 0000000000..e8f2e46786 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ReplicateKey.go @@ -0,0 +1,337 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Replicates a multi-Region key into the specified Region. This operation creates +// a multi-Region replica key based on a multi-Region primary key in a different +// Region of the same Amazon Web Services partition. You can create multiple +// replicas of a primary key, but each must be in a different Region. To create a +// multi-Region primary key, use the CreateKey operation. This operation supports +// multi-Region keys, an KMS feature that lets you create multiple interoperable +// KMS keys in different Amazon Web Services Regions. Because these KMS keys have +// the same key ID, key material, and other metadata, you can use them +// interchangeably to encrypt data in one Amazon Web Services Region and decrypt it +// in a different Amazon Web Services Region without re-encrypting the data or +// making a cross-Region call. For more information about multi-Region keys, see +// Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. A replica key is a +// fully-functional KMS key that can be used independently of its primary and peer +// replica keys. A primary key and its replica keys share properties that make them +// interoperable. They have the same key ID +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) +// and key material. They also have the same key spec +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), +// key usage +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), +// key material origin +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), +// and automatic key rotation status +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). KMS +// automatically synchronizes these shared properties among related multi-Region +// keys. All other properties of a replica key can differ, including its key policy +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html), tags +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html), +// aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), +// and Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). KMS +// pricing and quotas for KMS keys apply to each primary key and replica key. When +// this operation completes, the new replica key has a transient key state of +// Creating. This key state changes to Enabled (or PendingImport) after a few +// seconds when the process of creating the new replica key is complete. While the +// key state is Creating, you can manage key, but you cannot yet use it in +// cryptographic operations. If you are creating and using the replica key +// programmatically, retry on KMSInvalidStateException or call DescribeKey to check +// its KeyState value before using it. For details about the Creating key state, +// see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. You cannot create more than one replica +// of a primary key in any Region. If the Region already includes a replica of the +// key you're trying to replicate, ReplicateKey returns an AlreadyExistsException +// error. If the key state of the existing replica is PendingDeletion, you can +// cancel the scheduled key deletion (CancelKeyDeletion) or wait for the key to be +// deleted. The new replica key you create will have the same shared properties +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties) +// as the original replica key. The CloudTrail log of a ReplicateKey operation +// records a ReplicateKey operation in the primary key's Region and a CreateKey +// operation in the replica key's Region. If you replicate a multi-Region primary +// key with imported key material, the replica key is created with no key material. +// You must import the same key material that you imported into the primary key. +// For details, see Importing key material into multi-Region keys in the Key +// Management Service Developer Guide. To convert a replica key to a primary key, +// use the UpdatePrimaryRegion operation. ReplicateKey uses different default +// values for the KeyPolicy and Tags parameters than those used in the KMS console. +// For details, see the parameter descriptions. Cross-account use: No. You cannot +// use this operation to create a replica key in a different Amazon Web Services +// account. Required permissions: +// +// * kms:ReplicateKey on the primary key (in the +// primary key's Region). Include this permission in the primary key's key +// policy. +// +// * kms:CreateKey in an IAM policy in the replica Region. +// +// * To use the +// Tags parameter, kms:TagResource in an IAM policy in the replica Region. +// +// Related +// operations +// +// * CreateKey +// +// * UpdatePrimaryRegion +func (c *Client) ReplicateKey(ctx context.Context, params *ReplicateKeyInput, optFns ...func(*Options)) (*ReplicateKeyOutput, error) { + if params == nil { + params = &ReplicateKeyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ReplicateKey", params, optFns, c.addOperationReplicateKeyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ReplicateKeyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ReplicateKeyInput struct { + + // Identifies the multi-Region primary key that is being replicated. To determine + // whether a KMS key is a multi-Region primary key, use the DescribeKey operation + // to check the value of the MultiRegionKeyType property. Specify the key ID or key + // ARN of a multi-Region primary key. For example: + // + // * Key ID: + // mrk-1234abcd12ab34cd56ef1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Region ID of the Amazon Web Services Region for this replica key. Enter the + // Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon Web + // Services Regions in which KMS is supported, see KMS service endpoints + // (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the + // Amazon Web Services General Reference. HMAC KMS keys are not supported in all + // Amazon Web Services Regions. If you try to replicate an HMAC KMS key in an + // Amazon Web Services Region in which HMAC keys are not supported, the + // ReplicateKey operation returns an UnsupportedOperationException. For a list of + // Regions in which HMAC KMS keys are supported, see HMAC keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key + // Management Service Developer Guide. The replica must be in a different Amazon + // Web Services Region than its primary key and other replicas of that primary key, + // but in the same Amazon Web Services partition. KMS must be available in the + // replica Region. If the Region is not enabled by default, the Amazon Web Services + // account must be enabled in the Region. For information about Amazon Web Services + // partitions, see Amazon Resource Names (ARNs) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in + // the Amazon Web Services General Reference. For information about enabling and + // disabling Regions, see Enabling a Region + // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) + // and Disabling a Region + // (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) + // in the Amazon Web Services General Reference. + // + // This member is required. + ReplicaRegion *string + + // A flag to indicate whether to bypass the key policy lockout safety check. + // Setting this value to true increases the risk that the KMS key becomes + // unmanageable. Do not set this value to true indiscriminately. For more + // information, refer to the scenario in the Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the Key Management Service Developer Guide. Use this parameter only + // when you intend to prevent the principal that is making the request from making + // a subsequent PutKeyPolicy request on the KMS key. The default value is false. + BypassPolicyLockoutSafetyCheck bool + + // A description of the KMS key. The default value is an empty string (no + // description). The description is not a shared property of multi-Region keys. You + // can specify the same description or a different description for each key in a + // set of related multi-Region keys. KMS does not synchronize this property. + Description *string + + // The key policy to attach to the KMS key. This parameter is optional. If you do + // not provide a key policy, KMS attaches the default key policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // to the KMS key. The key policy is not a shared property of multi-Region keys. + // You can specify the same key policy or a different key policy for each key in a + // set of related multi-Region keys. KMS does not synchronize this property. If you + // provide a key policy, it must meet the following criteria: + // + // * If you don't set + // BypassPolicyLockoutSafetyCheck to true, the key policy must give the caller + // kms:PutKeyPolicy permission on the replica key. This reduces the risk that the + // KMS key becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section of the Key Management Service Developer Guide . + // + // * Each statement in the + // key policy must contain one or more principals. The principals in the key policy + // must exist and be visible to KMS. When you create a new Amazon Web Services + // principal (for example, an IAM user or role), you might need to enforce a delay + // before including the new principal in a key policy because the new principal + // might not be immediately visible to KMS. For more information, see Changes that + // I make are not always immediately visible + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the Identity and Access Management User Guide . + // + // A key policy document can + // include only the following characters: + // + // * Printable ASCII characters from the + // space character (\u0020) through the end of the ASCII character range. + // + // * + // Printable characters in the Basic Latin and Latin-1 Supplement character set + // (through \u00FF). + // + // * The tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) special characters + // + // For information about key policies, see Key + // policies in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) in the + // Key Management Service Developer Guide. For help writing and formatting a JSON + // policy document, see the IAM JSON Policy Reference + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) in + // the Identity and Access Management User Guide . + Policy *string + + // Assigns one or more tags to the replica key. Use this parameter to tag the KMS + // key when it is created. To tag an existing KMS key, use the TagResource + // operation. Tagging or untagging a KMS key can allow or deny permission to the + // KMS key. For details, see ABAC in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key + // Management Service Developer Guide. To use this parameter, you must have + // kms:TagResource + // (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) + // permission in an IAM policy. Tags are not a shared property of multi-Region + // keys. You can specify the same tags or different tags for each key in a set of + // related multi-Region keys. KMS does not synchronize this property. Each tag + // consists of a tag key and a tag value. Both the tag key and the tag value are + // required, but the tag value can be an empty (null) string. You cannot have more + // than one tag on a KMS key with the same tag key. If you specify an existing tag + // key with a different tag value, KMS replaces the current tag value with the + // specified one. When you add tags to an Amazon Web Services resource, Amazon Web + // Services generates a cost allocation report with usage and costs aggregated by + // tags. Tags can also be used to control access to a KMS key. For details, see + // Tagging Keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). + Tags []types.Tag + + noSmithyDocumentSerde +} + +type ReplicateKeyOutput struct { + + // Displays details about the new replica key, including its Amazon Resource Name + // (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // and Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). It also + // includes the ARN and Amazon Web Services Region of its primary key and other + // replica keys. + ReplicaKeyMetadata *types.KeyMetadata + + // The key policy of the new replica key. The value is a key policy document in + // JSON format. + ReplicaPolicy *string + + // The tags on the new replica key. The value is a list of tag key and tag value + // pairs. + ReplicaTags []types.Tag + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationReplicateKeyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpReplicateKey{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpReplicateKeyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplicateKey(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opReplicateKey(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ReplicateKey", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go new file mode 100644 index 0000000000..adb4876b92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RetireGrant.go @@ -0,0 +1,159 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a grant. Typically, you retire a grant when you no longer need its +// permissions. To identify the grant to retire, use a grant token +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token), +// or both the grant ID and a key identifier (key ID or key ARN) of the KMS key. +// The CreateGrant operation returns both values. This operation can be called by +// the retiring principal for a grant, by the grantee principal if the grant allows +// the RetireGrant operation, and by the Amazon Web Services account in which the +// grant is created. It can also be called by principals to whom permission for +// retiring a grant is delegated. For details, see Retiring and revoking grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// in the Key Management Service Developer Guide. For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: Yes. You can retire a grant on a KMS key in a different +// Amazon Web Services account. Required permissions::Permission to retire a grant +// is determined primarily by the grant. For details, see Retiring and revoking +// grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateGrant +// +// * ListGrants +// +// * ListRetirableGrants +// +// * RevokeGrant +func (c *Client) RetireGrant(ctx context.Context, params *RetireGrantInput, optFns ...func(*Options)) (*RetireGrantOutput, error) { + if params == nil { + params = &RetireGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RetireGrant", params, optFns, c.addOperationRetireGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RetireGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RetireGrantInput struct { + + // Identifies the grant to retire. To get the grant ID, use CreateGrant, + // ListGrants, or ListRetirableGrants. + // + // * Grant ID Example - + // 0123456789012345678901234567890123456789012345678901234567890123 + GrantId *string + + // Identifies the grant to be retired. You can use a grant token to identify a new + // grant even before it has achieved eventual consistency. Only the CreateGrant + // operation returns a grant token. For details, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Eventual consistency + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) + // in the Key Management Service Developer Guide. + GrantToken *string + + // The key ARN KMS key associated with the grant. To find the key ARN, use the + // ListKeys operation. For example: + // arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string + + noSmithyDocumentSerde +} + +type RetireGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRetireGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRetireGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRetireGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRetireGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "RetireGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go new file mode 100644 index 0000000000..75676053d4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_RevokeGrant.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the specified grant. You revoke a grant to terminate the permissions +// that the grant allows. For more information, see Retiring and revoking grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) +// in the Key Management Service Developer Guide . When you create, retire, or +// revoke a grant, there might be a brief delay, usually less than five minutes, +// until the grant is available throughout KMS. This state is known as eventual +// consistency. For details, see Eventual consistency +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) +// in the Key Management Service Developer Guide . For detailed information about +// grants, including grant terminology, see Grants in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) in the Key +// Management Service Developer Guide . For examples of working with grants in +// several programming languages, see Programming grants +// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). +// Cross-account use: Yes. To perform this operation on a KMS key in a different +// Amazon Web Services account, specify the key ARN in the value of the KeyId +// parameter. Required permissions: kms:RevokeGrant +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy). Related operations: +// +// * CreateGrant +// +// * ListGrants +// +// * +// ListRetirableGrants +// +// * RetireGrant +func (c *Client) RevokeGrant(ctx context.Context, params *RevokeGrantInput, optFns ...func(*Options)) (*RevokeGrantOutput, error) { + if params == nil { + params = &RevokeGrantInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RevokeGrant", params, optFns, c.addOperationRevokeGrantMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RevokeGrantOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RevokeGrantInput struct { + + // Identifies the grant to revoke. To get the grant ID, use CreateGrant, + // ListGrants, or ListRetirableGrants. + // + // This member is required. + GrantId *string + + // A unique identifier for the KMS key associated with the grant. To get the key ID + // and key ARN for a KMS key, use ListKeys or DescribeKey. Specify the key ID or + // key ARN of the KMS key. To specify a KMS key in a different Amazon Web Services + // account, you must use the key ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type RevokeGrantOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRevokeGrantMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpRevokeGrant{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRevokeGrantValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeGrant(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRevokeGrant(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "RevokeGrant", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go new file mode 100644 index 0000000000..7056a43638 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_ScheduleKeyDeletion.go @@ -0,0 +1,202 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "time" +) + +// Schedules the deletion of a KMS key. By default, KMS applies a waiting period of +// 30 days, but you can specify a waiting period of 7-30 days. When this operation +// is successful, the key state of the KMS key changes to PendingDeletion and the +// key can't be used in any cryptographic operations. It remains in this state for +// the duration of the waiting period. Before the waiting period ends, you can use +// CancelKeyDeletion to cancel the deletion of the KMS key. After the waiting +// period ends, KMS deletes the KMS key, its key material, and all KMS data +// associated with it, including all aliases that refer to it. Deleting a KMS key +// is a destructive and potentially dangerous operation. When a KMS key is deleted, +// all data that was encrypted under the KMS key is unrecoverable. (The only +// exception is a multi-Region replica key.) To prevent the use of a KMS key +// without deleting it, use DisableKey. If you schedule deletion of a KMS key from +// a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), +// when the waiting period expires, ScheduleKeyDeletion deletes the KMS key from +// KMS. Then KMS makes a best effort to delete the key material from the associated +// CloudHSM cluster. However, you might need to manually delete the orphaned key +// material +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// from the cluster and its backups. You can schedule the deletion of a +// multi-Region primary key and its replica keys at any time. However, KMS will not +// delete a multi-Region primary key with existing replica keys. If you schedule +// the deletion of a primary key with replicas, its key state changes to +// PendingReplicaDeletion and it cannot be replicated or used in cryptographic +// operations. This status can continue indefinitely. When the last of its replicas +// keys is deleted (not just scheduled), the key state of the primary key changes +// to PendingDeletion and its waiting period (PendingWindowInDays) begins. For +// details, see Deleting multi-Region keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) +// in the Key Management Service Developer Guide. For more information about +// scheduling a KMS key for deletion, see Deleting KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) in +// the Key Management Service Developer Guide. The KMS key that you use for this +// operation must be in a compatible key state. For details, see Key states of KMS +// keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in +// the Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:ScheduleKeyDeletion (key policy) Related operations +// +// * +// CancelKeyDeletion +// +// * DisableKey +func (c *Client) ScheduleKeyDeletion(ctx context.Context, params *ScheduleKeyDeletionInput, optFns ...func(*Options)) (*ScheduleKeyDeletionOutput, error) { + if params == nil { + params = &ScheduleKeyDeletionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ScheduleKeyDeletion", params, optFns, c.addOperationScheduleKeyDeletionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ScheduleKeyDeletionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ScheduleKeyDeletionInput struct { + + // The unique identifier of the KMS key to delete. Specify the key ID or key ARN of + // the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key + // ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The waiting period, specified in number of days. After the waiting period ends, + // KMS deletes the KMS key. If the KMS key is a multi-Region primary key with + // replica keys, the waiting period begins when the last of its replica keys is + // deleted. Otherwise, the waiting period begins immediately. This value is + // optional. If you include a value, it must be between 7 and 30, inclusive. If you + // do not include a value, it defaults to 30. + PendingWindowInDays *int32 + + noSmithyDocumentSerde +} + +type ScheduleKeyDeletionOutput struct { + + // The date and time after which KMS deletes the KMS key. If the KMS key is a + // multi-Region primary key with replica keys, this field does not appear. The + // deletion date for the primary key isn't known until its last replica key is + // deleted. + DeletionDate *time.Time + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the KMS key whose deletion is scheduled. + KeyId *string + + // The current status of the KMS key. For more information about how key state + // affects the use of a KMS key, see Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the + // Key Management Service Developer Guide. + KeyState types.KeyState + + // The waiting period before the KMS key is deleted. If the KMS key is a + // multi-Region primary key with replicas, the waiting period begins when the last + // of its replica keys is deleted. Otherwise, the waiting period begins + // immediately. + PendingWindowInDays *int32 + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationScheduleKeyDeletionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpScheduleKeyDeletion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpScheduleKeyDeletionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScheduleKeyDeletion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opScheduleKeyDeletion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "ScheduleKeyDeletion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go new file mode 100644 index 0000000000..4c66cbd9a4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Sign.go @@ -0,0 +1,238 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a digital signature (https://en.wikipedia.org/wiki/Digital_signature) +// for a message or message digest by using the private key in an asymmetric +// signing KMS key. To verify the signature, use the Verify operation, or use the +// public key in the same asymmetric KMS key outside of KMS. For information about +// asymmetric KMS keys, see Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. Digital signatures are generated +// and verified by using asymmetric key pair, such as an RSA or ECC pair that is +// represented by an asymmetric KMS key. The key owner (or an authorized user) uses +// their private key to sign a message. Anyone with the public key can verify that +// the message was signed with that particular private key and that the message +// hasn't changed since it was signed. To use the Sign operation, provide the +// following information: +// +// * Use the KeyId parameter to identify an asymmetric KMS +// key with a KeyUsage value of SIGN_VERIFY. To get the KeyUsage value of a KMS +// key, use the DescribeKey operation. The caller must have kms:Sign permission on +// the KMS key. +// +// * Use the Message parameter to specify the message or message +// digest to sign. You can submit messages of up to 4096 bytes. To sign a larger +// message, generate a hash digest of the message, and then provide the hash digest +// in the Message parameter. To indicate whether the message is a full message or a +// digest, use the MessageType parameter. +// +// * Choose a signing algorithm that is +// compatible with the KMS key. +// +// When signing a message, be sure to record the KMS +// key and the signing algorithm. This information is required to verify the +// signature. Best practices recommend that you limit the time during which any +// signature is effective. This deters an attack where the actor uses a signed +// message to establish validity repeatedly or long after the message is +// superseded. Signatures do not include a timestamp, but you can include a +// timestamp in the signed message to help you detect when its time to refresh the +// signature. To verify the signature that this operation generates, use the Verify +// operation. Or use the GetPublicKey operation to download the public key and then +// use the public key to verify the signature outside of KMS. The KMS key that you +// use for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Sign +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: Verify +func (c *Client) Sign(ctx context.Context, params *SignInput, optFns ...func(*Options)) (*SignOutput, error) { + if params == nil { + params = &SignInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Sign", params, optFns, c.addOperationSignMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*SignOutput) + out.ResultMetadata = metadata + return out, nil +} + +type SignInput struct { + + // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric KMS + // key to sign the message. The KeyUsage type of the KMS key must be SIGN_VERIFY. + // To find the KeyUsage of a KMS key, use the DescribeKey operation. To specify a + // KMS key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. + // To sign a larger message, provide the message digest. If you provide a message, + // KMS generates a hash digest of the message and then signs it. + // + // This member is required. + Message []byte + + // Specifies the signing algorithm to use when signing the message. Choose an + // algorithm that is compatible with the type and size of the specified asymmetric + // KMS key. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Tells KMS whether the value of the Message parameter is a message or message + // digest. The default value, RAW, indicates a message. To indicate a message + // digest, enter DIGEST. + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type SignOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key that was used to sign the message. + KeyId *string + + // The cryptographic signature that was generated for the message. + // + // * When used + // with the supported RSA signing algorithms, the encoding of this value is defined + // by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). + // + // * When used with + // the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing algorithms, this + // value is a DER-encoded object as defined by ANS X9.62–2005 and RFC 3279 Section + // 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). This is the most + // commonly used signature format and is appropriate for most uses. + // + // When you use + // the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. + // Otherwise, it is not Base64-encoded. + Signature []byte + + // The signing algorithm that was used to sign the message. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationSignMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpSign{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpSign{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpSignValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSign(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opSign(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Sign", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go new file mode 100644 index 0000000000..ba25b0f439 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_TagResource.go @@ -0,0 +1,176 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Adds or edits tags on a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// Tagging or untagging a KMS key can allow or deny permission to the KMS key. For +// details, see ABAC in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. Each tag consists of a tag key and a tag +// value, both of which are case-sensitive strings. The tag value can be an empty +// (null) string. To add a tag, specify a new tag key and a tag value. To edit a +// tag, specify an existing tag key and a new tag value. You can use this operation +// to tag a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), +// but you cannot tag an Amazon Web Services managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk), +// an Amazon Web Services owned key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk), +// a custom key store +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept), +// or an alias +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept). +// You can also add tags to a KMS key while creating it (CreateKey) or replicating +// it (ReplicateKey). For information about using tags in KMS, see Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For +// general information about tags, including the format and syntax, see Tagging +// Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:TagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * ListResourceTags +// +// * +// ReplicateKey +// +// * UntagResource +func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) { + if params == nil { + params = &TagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*TagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type TagResourceInput struct { + + // Identifies a customer managed key in the account and Region. Specify the key ID + // or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tags. Each tag consists of a tag key and a tag value. The tag value + // can be an empty (null) string. You cannot have more than one tag on a KMS key + // with the same tag key. If you specify an existing tag key with a different tag + // value, KMS replaces the current tag value with the specified one. + // + // This member is required. + Tags []types.Tag + + noSmithyDocumentSerde +} + +type TagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpTagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpTagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "TagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go new file mode 100644 index 0000000000..96f66589f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UntagResource.go @@ -0,0 +1,161 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes tags from a customer managed key +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). +// To delete a tag, specify the tag key and the KMS key. Tagging or untagging a KMS +// key can allow or deny permission to the KMS key. For details, see ABAC in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. When it succeeds, the UntagResource +// operation doesn't return any output. Also, if the specified tag key isn't found +// on the KMS key, it doesn't throw an exception or return a response. To confirm +// that the operation worked, use the ListResourceTags operation. For information +// about using tags in KMS, see Tagging keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). For +// general information about tags, including the format and syntax, see Tagging +// Amazon Web Services resources +// (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the Amazon +// Web Services General Reference. The KMS key that you use for this operation must +// be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:UntagResource +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * ListResourceTags +// +// * +// ReplicateKey +// +// * TagResource +func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) { + if params == nil { + params = &UntagResourceInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UntagResourceOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UntagResourceInput struct { + + // Identifies the KMS key from which you are removing tags. Specify the key ID or + // key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // One or more tag keys. Specify only the tag keys, not the tag values. + // + // This member is required. + TagKeys []string + + noSmithyDocumentSerde +} + +type UntagResourceOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUntagResource{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUntagResourceValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UntagResource", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go new file mode 100644 index 0000000000..c669b7c6b5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateAlias.go @@ -0,0 +1,186 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Associates an existing KMS alias with a different KMS key. Each alias is +// associated with only one KMS key at a time, although a KMS key can have multiple +// aliases. The alias and the KMS key must be in the same Amazon Web Services +// account and Region. Adding, deleting, or updating an alias can allow or deny +// permission to the KMS key. For details, see ABAC in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) in the Key +// Management Service Developer Guide. The current and new KMS key must be the same +// type (both symmetric or both asymmetric), and they must have the same key usage +// (ENCRYPT_DECRYPT or SIGN_VERIFY). This restriction prevents errors in code that +// uses aliases. If you must assign an alias to a different type of KMS key, use +// DeleteAlias to delete the old alias and CreateAlias to create a new alias. You +// cannot use UpdateAlias to change an alias name. To change an alias name, use +// DeleteAlias to delete the old alias and CreateAlias to create a new alias. +// Because an alias is not a property of a KMS key, you can create, update, and +// delete the aliases of a KMS key without affecting the KMS key. Also, aliases do +// not appear in the response from the DescribeKey operation. To get the aliases of +// all KMS keys in the account, use the ListAliases operation. The KMS key that you +// use for this operation must be in a compatible key state. For details, see Key +// states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the alias (IAM policy). +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the current KMS key (key policy). +// +// * kms:UpdateAlias +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// on the new KMS key (key policy). +// +// For details, see Controlling access to aliases +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) +// in the Key Management Service Developer Guide. Related operations: +// +// * +// CreateAlias +// +// * DeleteAlias +// +// * ListAliases +func (c *Client) UpdateAlias(ctx context.Context, params *UpdateAliasInput, optFns ...func(*Options)) (*UpdateAliasOutput, error) { + if params == nil { + params = &UpdateAliasInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateAlias", params, optFns, c.addOperationUpdateAliasMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateAliasOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateAliasInput struct { + + // Identifies the alias that is changing its KMS key. This value must begin with + // alias/ followed by the alias name, such as alias/ExampleAlias. You cannot use + // UpdateAlias to change the alias name. + // + // This member is required. + AliasName *string + + // Identifies the customer managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) + // to associate with the alias. You don't have permission to associate an alias + // with an Amazon Web Services managed key + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). + // The KMS key must be in the same Amazon Web Services account and Region as the + // alias. Also, the new target KMS key must be the same type as the current target + // KMS key (both symmetric or both asymmetric) and they must have the same key + // usage. Specify the key ID or key ARN of the KMS key. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. To verify + // that the alias is mapped to the correct KMS key, use ListAliases. + // + // This member is required. + TargetKeyId *string + + noSmithyDocumentSerde +} + +type UpdateAliasOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateAliasMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateAlias{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateAliasValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAlias(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateAlias(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateAlias", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go new file mode 100644 index 0000000000..f902aadf64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateCustomKeyStore.go @@ -0,0 +1,191 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the properties of a custom key store. Use the CustomKeyStoreId parameter +// to identify the custom key store you want to edit. Use the remaining parameters +// to change the properties of the custom key store. You can only update a custom +// key store that is disconnected. To disconnect the custom key store, use +// DisconnectCustomKeyStore. To reconnect the custom key store after the update +// completes, use ConnectCustomKeyStore. To find the connection state of a custom +// key store, use the DescribeCustomKeyStores operation. The CustomKeyStoreId +// parameter is required in all commands. Use the other parameters of +// UpdateCustomKeyStore to edit your key store settings. +// +// * Use the +// NewCustomKeyStoreName parameter to change the friendly name of the custom key +// store to the value that you specify. +// +// * Use the KeyStorePassword parameter tell +// KMS the current password of the kmsuser crypto user (CU) +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) +// in the associated CloudHSM cluster. You can use this parameter to fix connection +// failures +// (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) +// that occur when KMS cannot log into the associated cluster because the kmsuser +// password has changed. This value does not change the password in the CloudHSM +// cluster. +// +// * Use the CloudHsmClusterId parameter to associate the custom key +// store with a different, but related, CloudHSM cluster. You can use this +// parameter to repair a custom key store if its CloudHSM cluster becomes corrupted +// or is deleted, or when you need to create or restore a cluster from a +// backup. +// +// If the operation succeeds, it returns a JSON object with no properties. +// This operation is part of the custom key store feature +// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) +// feature in KMS, which combines the convenience and extensive integration of KMS +// with the isolation and control of a single-tenant key store. Cross-account use: +// No. You cannot perform this operation on a custom key store in a different +// Amazon Web Services account. Required permissions: kms:UpdateCustomKeyStore +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (IAM policy) Related operations: +// +// * ConnectCustomKeyStore +// +// * +// CreateCustomKeyStore +// +// * DeleteCustomKeyStore +// +// * DescribeCustomKeyStores +// +// * +// DisconnectCustomKeyStore +func (c *Client) UpdateCustomKeyStore(ctx context.Context, params *UpdateCustomKeyStoreInput, optFns ...func(*Options)) (*UpdateCustomKeyStoreOutput, error) { + if params == nil { + params = &UpdateCustomKeyStoreInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateCustomKeyStore", params, optFns, c.addOperationUpdateCustomKeyStoreMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateCustomKeyStoreOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateCustomKeyStoreInput struct { + + // Identifies the custom key store that you want to update. Enter the ID of the + // custom key store. To find the ID of a custom key store, use the + // DescribeCustomKeyStores operation. + // + // This member is required. + CustomKeyStoreId *string + + // Associates the custom key store with a related CloudHSM cluster. Enter the + // cluster ID of the cluster that you used to create the custom key store or a + // cluster that shares a backup history and has the same cluster certificate as the + // original cluster. You cannot use this parameter to associate a custom key store + // with an unrelated cluster. In addition, the replacement cluster must fulfill the + // requirements + // (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) + // for a cluster associated with a custom key store. To view the cluster + // certificate of a cluster, use the DescribeClusters + // (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) + // operation. + CloudHsmClusterId *string + + // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM + // cluster that is associated with the custom key store. This parameter tells KMS + // the current password of the kmsuser crypto user (CU). It does not set or change + // the password of any users in the CloudHSM cluster. + KeyStorePassword *string + + // Changes the friendly name of the custom key store to the value that you specify. + // The custom key store name must be unique in the Amazon Web Services account. + NewCustomKeyStoreName *string + + noSmithyDocumentSerde +} + +type UpdateCustomKeyStoreOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateCustomKeyStoreMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateCustomKeyStore{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateCustomKeyStoreValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateCustomKeyStore(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateCustomKeyStore(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateCustomKeyStore", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go new file mode 100644 index 0000000000..b0700a418d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdateKeyDescription.go @@ -0,0 +1,143 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Updates the description of a KMS key. To see the description of a KMS key, use +// DescribeKey. The KMS key that you use for this operation must be in a compatible +// key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: No. You cannot +// perform this operation on a KMS key in a different Amazon Web Services account. +// Required permissions: kms:UpdateKeyDescription +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations +// +// * CreateKey +// +// * DescribeKey +func (c *Client) UpdateKeyDescription(ctx context.Context, params *UpdateKeyDescriptionInput, optFns ...func(*Options)) (*UpdateKeyDescriptionOutput, error) { + if params == nil { + params = &UpdateKeyDescriptionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdateKeyDescription", params, optFns, c.addOperationUpdateKeyDescriptionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdateKeyDescriptionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdateKeyDescriptionInput struct { + + // New description for the KMS key. + // + // This member is required. + Description *string + + // Updates the description of the specified KMS key. Specify the key ID or key ARN + // of the KMS key. For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + noSmithyDocumentSerde +} + +type UpdateKeyDescriptionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdateKeyDescriptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateKeyDescription{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdateKeyDescriptionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKeyDescription(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdateKeyDescription(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdateKeyDescription", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go new file mode 100644 index 0000000000..e7b87d5dfe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_UpdatePrimaryRegion.go @@ -0,0 +1,199 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Changes the primary key of a multi-Region key. This operation changes the +// replica key in the specified Region to a primary key and changes the former +// primary key to a replica key. For example, suppose you have a primary key in +// us-east-1 and a replica key in eu-west-2. If you run UpdatePrimaryRegion with a +// PrimaryRegion value of eu-west-2, the primary key is now the key in eu-west-2, +// and the key in us-east-1 becomes a replica key. For details, see Updating the +// primary Region +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) +// in the Key Management Service Developer Guide. This operation supports +// multi-Region keys, an KMS feature that lets you create multiple interoperable +// KMS keys in different Amazon Web Services Regions. Because these KMS keys have +// the same key ID, key material, and other metadata, you can use them +// interchangeably to encrypt data in one Amazon Web Services Region and decrypt it +// in a different Amazon Web Services Region without re-encrypting the data or +// making a cross-Region call. For more information about multi-Region keys, see +// Multi-Region keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) +// in the Key Management Service Developer Guide. The primary key of a multi-Region +// key is the source for properties that are always shared by primary and replica +// keys, including the key material, key ID +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id), +// key spec +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), +// key usage +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), +// key material origin +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), +// and automatic key rotation +// (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). It's +// the only key that can be replicated. You cannot delete the primary key +// (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) +// until all replica keys are deleted. The key ID and primary Region that you +// specify uniquely identify the replica key that will become the primary key. The +// primary Region must already have a replica key. This operation does not create a +// KMS key in the specified Region. To find the replica keys, use the DescribeKey +// operation on the primary key or any replica key. To create a replica key, use +// the ReplicateKey operation. You can run this operation while using the affected +// multi-Region keys in cryptographic operations. This operation should not delay, +// interrupt, or cause failures in cryptographic operations. Even after this +// operation completes, the process of updating the primary Region might still be +// in progress for a few more seconds. Operations such as DescribeKey might display +// both the old and new primary keys as replicas. The old and new primary keys have +// a transient key state of Updating. The original key state is restored when the +// update is complete. While the key state is Updating, you can use the keys in +// cryptographic operations, but you cannot replicate the new primary key or +// perform certain management operations, such as enabling or disabling these keys. +// For details about the Updating key state, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. This operation does not return any +// output. To verify that primary key is changed, use the DescribeKey operation. +// Cross-account use: No. You cannot use this operation in a different Amazon Web +// Services account. Required permissions: +// +// * kms:UpdatePrimaryRegion on the +// current primary key (in the primary key's Region). Include this permission +// primary key's key policy. +// +// * kms:UpdatePrimaryRegion on the current replica key +// (in the replica key's Region). Include this permission in the replica key's key +// policy. +// +// # Related operations +// +// * CreateKey +// +// * ReplicateKey +func (c *Client) UpdatePrimaryRegion(ctx context.Context, params *UpdatePrimaryRegionInput, optFns ...func(*Options)) (*UpdatePrimaryRegionOutput, error) { + if params == nil { + params = &UpdatePrimaryRegionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "UpdatePrimaryRegion", params, optFns, c.addOperationUpdatePrimaryRegionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*UpdatePrimaryRegionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type UpdatePrimaryRegionInput struct { + + // Identifies the current primary key. When the operation completes, this KMS key + // will be a replica key. Specify the key ID or key ARN of a multi-Region primary + // key. For example: + // + // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab + // + // To + // get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. + // + // This member is required. + KeyId *string + + // The Amazon Web Services Region of the new primary key. Enter the Region ID, such + // as us-east-1 or ap-southeast-2. There must be an existing replica key in this + // Region. When the operation completes, the multi-Region key in this Region will + // be the primary key. + // + // This member is required. + PrimaryRegion *string + + noSmithyDocumentSerde +} + +type UpdatePrimaryRegionOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationUpdatePrimaryRegionMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdatePrimaryRegion{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpUpdatePrimaryRegionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdatePrimaryRegion(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opUpdatePrimaryRegion(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "UpdatePrimaryRegion", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go new file mode 100644 index 0000000000..a30fbf752f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_Verify.go @@ -0,0 +1,221 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies a digital signature that was generated by the Sign operation. +// Verification confirms that an authorized user signed the message with the +// specified KMS key and signing algorithm, and the message hasn't changed since it +// was signed. If the signature is verified, the value of the SignatureValid field +// in the response is True. If the signature verification fails, the Verify +// operation fails with an KMSInvalidSignatureException exception. A digital +// signature is generated by using the private key in an asymmetric KMS key. The +// signature is verified by using the public key in the same asymmetric KMS key. +// For information about asymmetric KMS keys, see Asymmetric KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the Key Management Service Developer Guide. To verify a digital signature, +// you can use the Verify operation. Specify the same asymmetric KMS key, message, +// and signing algorithm that were used to produce the signature. You can also +// verify the digital signature by using the public key of the KMS key outside of +// KMS. Use the GetPublicKey operation to download the public key in the asymmetric +// KMS key and then use the public key to verify the signature outside of KMS. To +// verify a signature outside of KMS with an SM2 public key, you must specify the +// distinguishing ID. By default, KMS uses 1234567812345678 as the distinguishing +// ID. For more information, see Offline verification with SM2 key pairs +// (https://docs.aws.amazon.com/kms/latest/developerguide/asymmetric-key-specs.html#key-spec-sm-offline-verification) +// in Key Management Service Developer Guide. The advantage of using the Verify +// operation is that it is performed within KMS. As a result, it's easy to call, +// the operation is performed within the FIPS boundary, it is logged in CloudTrail, +// and you can use key policy and IAM policy to determine who is authorized to use +// the KMS key to verify signatures. The KMS key that you use for this operation +// must be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:Verify +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: Sign +func (c *Client) Verify(ctx context.Context, params *VerifyInput, optFns ...func(*Options)) (*VerifyOutput, error) { + if params == nil { + params = &VerifyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "Verify", params, optFns, c.addOperationVerifyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyInput struct { + + // Identifies the asymmetric KMS key that will be used to verify the signature. + // This must be the same KMS key that was used to generate the signature. If you + // specify a different KMS key, the signature verification fails. To specify a KMS + // key, use its key ID, key ARN, alias name, or alias ARN. When using an alias + // name, prefix it with "alias/". To specify a KMS key in a different Amazon Web + // Services account, you must use the key ARN or alias ARN. For example: + // + // * Key ID: + // 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * + // Alias name: alias/ExampleAlias + // + // * Alias ARN: + // arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias + // + // To get the key ID and key + // ARN for a KMS key, use ListKeys or DescribeKey. To get the alias name and alias + // ARN, use ListAliases. + // + // This member is required. + KeyId *string + + // Specifies the message that was signed. You can submit a raw message of up to + // 4096 bytes, or a hash digest of the message. If you submit a digest, use the + // MessageType parameter with a value of DIGEST. If the message specified here is + // different from the message that was signed, the signature verification fails. A + // message and its hash digest are considered to be the same message. + // + // This member is required. + Message []byte + + // The signature that the Sign operation generated. + // + // This member is required. + Signature []byte + + // The signing algorithm that was used to sign the message. If you submit a + // different algorithm, the signature verification fails. + // + // This member is required. + SigningAlgorithm types.SigningAlgorithmSpec + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + // Tells KMS whether the value of the Message parameter is a message or message + // digest. The default value, RAW, indicates a message. To indicate a message + // digest, enter DIGEST. Use the DIGEST value only when the value of the Message + // parameter is a message digest. If you use the DIGEST value with a raw message, + // the security of the verification operation can be compromised. + MessageType types.MessageType + + noSmithyDocumentSerde +} + +type VerifyOutput struct { + + // The Amazon Resource Name (key ARN + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric KMS key that was used to verify the signature. + KeyId *string + + // A Boolean value that indicates whether the signature was verified. A value of + // True indicates that the Signature was produced by signing the Message with the + // specified KeyID and SigningAlgorithm. If the signature is not verified, the + // Verify operation fails with a KMSInvalidSignatureException exception. + SignatureValid bool + + // The signing algorithm that was used to verify the signature. + SigningAlgorithm types.SigningAlgorithmSpec + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerify{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerify{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpVerifyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerify(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerify(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "Verify", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go new file mode 100644 index 0000000000..c3cea66975 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/api_op_VerifyMac.go @@ -0,0 +1,181 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Verifies the hash-based message authentication code (HMAC) for a specified +// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes +// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, and +// compares the computed HMAC to the HMAC that you specify. If the HMACs are +// identical, the verification succeeds; otherwise, it fails. Verification +// indicates that the message hasn't changed since the HMAC was calculated, and the +// specified key was used to generate and verify the HMAC. This operation is part +// of KMS support for HMAC KMS keys. For details, see HMAC keys in KMS +// (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) in the Key +// Management Service Developer Guide. The KMS key that you use for this operation +// must be in a compatible key state. For details, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide. Cross-account use: Yes. To perform this +// operation with a KMS key in a different Amazon Web Services account, specify the +// key ARN or alias ARN in the value of the KeyId parameter. Required permissions: +// kms:VerifyMac +// (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) +// (key policy) Related operations: GenerateMac +func (c *Client) VerifyMac(ctx context.Context, params *VerifyMacInput, optFns ...func(*Options)) (*VerifyMacOutput, error) { + if params == nil { + params = &VerifyMacInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "VerifyMac", params, optFns, c.addOperationVerifyMacMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*VerifyMacOutput) + out.ResultMetadata = metadata + return out, nil +} + +type VerifyMacInput struct { + + // The KMS key that will be used in the verification. Enter a key ID of the KMS key + // that was used to generate the HMAC. If you identify a different KMS key, the + // VerifyMac operation fails. + // + // This member is required. + KeyId *string + + // The HMAC to verify. Enter the HMAC that was generated by the GenerateMac + // operation when you specified the same message, HMAC KMS key, and MAC algorithm + // as the values specified in this request. + // + // This member is required. + Mac []byte + + // The MAC algorithm that will be used in the verification. Enter the same MAC + // algorithm that was used to compute the HMAC. This algorithm must be supported by + // the HMAC KMS key identified by the KeyId parameter. + // + // This member is required. + MacAlgorithm types.MacAlgorithmSpec + + // The message that will be used in the verification. Enter the same message that + // was used to generate the HMAC. GenerateMac and VerifyMac do not provide special + // handling for message digests. If you generated an HMAC for a hash digest of a + // message, you must verify the HMAC for the same hash digest. + // + // This member is required. + Message []byte + + // A list of grant tokens. Use a grant token when your permission to call this + // operation comes from a new grant that has not yet achieved eventual consistency. + // For more information, see Grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) + // and Using a grant token + // (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) + // in the Key Management Service Developer Guide. + GrantTokens []string + + noSmithyDocumentSerde +} + +type VerifyMacOutput struct { + + // The HMAC KMS key used in the verification. + KeyId *string + + // The MAC algorithm used in the verification. + MacAlgorithm types.MacAlgorithmSpec + + // A Boolean value that indicates whether the HMAC was verified. A value of True + // indicates that the HMAC (Mac) was generated with the specified Message, HMAC KMS + // key (KeyID) and MacAlgorithm.. If the HMAC is not verified, the VerifyMac + // operation fails with a KMSInvalidMacException exception. This exception + // indicates that one or more of the inputs changed since the HMAC was computed. + MacValid bool + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationVerifyMacMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsAwsjson11_serializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpVerifyMac{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = addHTTPSignerV4Middleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpVerifyMacValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opVerifyMac(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opVerifyMac(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + SigningName: "kms", + OperationName: "VerifyMac", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go new file mode 100644 index 0000000000..70bc055b8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/deserializers.go @@ -0,0 +1,11711 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "io/ioutil" + "strings" +) + +type awsAwsjson11_deserializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpCancelKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCancelKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response, &metadata) + } + output := &CancelKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCancelKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpConnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpConnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response, &metadata) + } + output := &ConnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorConnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateAlias struct { +} + +func (*awsAwsjson11_deserializeOpCreateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateAlias(response, &metadata) + } + output := &CreateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidAliasNameException", errorCode): + return awsAwsjson11_deserializeErrorInvalidAliasNameException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpCreateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response, &metadata) + } + output := &CreateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInUseException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("IncorrectTrustAnchorException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateGrant struct { +} + +func (*awsAwsjson11_deserializeOpCreateGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateGrant(response, &metadata) + } + output := &CreateGrantOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateGrantOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpCreateKey struct { +} + +func (*awsAwsjson11_deserializeOpCreateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpCreateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorCreateKey(response, &metadata) + } + output := &CreateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentCreateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorCreateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDecrypt struct { +} + +func (*awsAwsjson11_deserializeOpDecrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDecrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDecrypt(response, &metadata) + } + output := &DecryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDecryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDecrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_deserializeOpDeleteAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteAlias(response, &metadata) + } + output := &DeleteAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDeleteCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response, &metadata) + } + output := &DeleteCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreHasCMKsException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDeleteImportedKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response, &metadata) + } + output := &DeleteImportedKeyMaterialOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDeleteImportedKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_deserializeOpDescribeCustomKeyStores) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeCustomKeyStores) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response, &metadata) + } + output := &DescribeCustomKeyStoresOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeCustomKeyStores(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDescribeKey struct { +} + +func (*awsAwsjson11_deserializeOpDescribeKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDescribeKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDescribeKey(response, &metadata) + } + output := &DescribeKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDescribeKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKey struct { +} + +func (*awsAwsjson11_deserializeOpDisableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKey(response, &metadata) + } + output := &DisableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpDisableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisableKeyRotation(response, &metadata) + } + output := &DisableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpDisconnectCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpDisconnectCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response, &metadata) + } + output := &DisconnectCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorDisconnectCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKey struct { +} + +func (*awsAwsjson11_deserializeOpEnableKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKey(response, &metadata) + } + output := &EnableKeyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_deserializeOpEnableKeyRotation) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEnableKeyRotation) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEnableKeyRotation(response, &metadata) + } + output := &EnableKeyRotationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEnableKeyRotation(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorEncrypt(response, &metadata) + } + output := &EncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKey(response, &metadata) + } + output := &GenerateDataKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPair) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPair) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response, &metadata) + } + output := &GenerateDataKeyPairOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPair(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyPairWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyPairWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyPairWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateDataKeyWithoutPlaintext) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response, &metadata) + } + output := &GenerateDataKeyWithoutPlaintextOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateDataKeyWithoutPlaintext(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateMac struct { +} + +func (*awsAwsjson11_deserializeOpGenerateMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateMac(response, &metadata) + } + output := &GenerateMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_deserializeOpGenerateRandom) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGenerateRandom) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGenerateRandom(response, &metadata) + } + output := &GenerateRandomOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGenerateRandom(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyPolicy(response, &metadata) + } + output := &GetKeyPolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_deserializeOpGetKeyRotationStatus) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetKeyRotationStatus) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response, &metadata) + } + output := &GetKeyRotationStatusOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetKeyRotationStatus(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_deserializeOpGetParametersForImport) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetParametersForImport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetParametersForImport(response, &metadata) + } + output := &GetParametersForImportOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetParametersForImport(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_deserializeOpGetPublicKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpGetPublicKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorGetPublicKey(response, &metadata) + } + output := &GetPublicKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorGetPublicKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_deserializeOpImportKeyMaterial) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpImportKeyMaterial) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorImportKeyMaterial(response, &metadata) + } + output := &ImportKeyMaterialOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorImportKeyMaterial(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("ExpiredImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorExpiredImportTokenException(response, errorBody) + + case strings.EqualFold("IncorrectKeyMaterialException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidImportTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidImportTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListAliases struct { +} + +func (*awsAwsjson11_deserializeOpListAliases) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListAliases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListAliases(response, &metadata) + } + output := &ListAliasesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListAliasesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListAliases(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListGrants struct { +} + +func (*awsAwsjson11_deserializeOpListGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListGrants(response, &metadata) + } + output := &ListGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_deserializeOpListKeyPolicies) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeyPolicies) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeyPolicies(response, &metadata) + } + output := &ListKeyPoliciesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeyPolicies(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListKeys struct { +} + +func (*awsAwsjson11_deserializeOpListKeys) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListKeys) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListKeys(response, &metadata) + } + output := &ListKeysOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListKeysOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListKeys(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListResourceTags struct { +} + +func (*awsAwsjson11_deserializeOpListResourceTags) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListResourceTags) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListResourceTags(response, &metadata) + } + output := &ListResourceTagsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListResourceTags(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_deserializeOpListRetirableGrants) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpListRetirableGrants) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorListRetirableGrants(response, &metadata) + } + output := &ListRetirableGrantsOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorListRetirableGrants(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidMarkerException", errorCode): + return awsAwsjson11_deserializeErrorInvalidMarkerException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_deserializeOpPutKeyPolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpPutKeyPolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorPutKeyPolicy(response, &metadata) + } + output := &PutKeyPolicyOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorPutKeyPolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReEncrypt struct { +} + +func (*awsAwsjson11_deserializeOpReEncrypt) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReEncrypt) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReEncrypt(response, &metadata) + } + output := &ReEncryptOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReEncryptOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReEncrypt(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("IncorrectKeyException", errorCode): + return awsAwsjson11_deserializeErrorIncorrectKeyException(response, errorBody) + + case strings.EqualFold("InvalidCiphertextException", errorCode): + return awsAwsjson11_deserializeErrorInvalidCiphertextException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpReplicateKey struct { +} + +func (*awsAwsjson11_deserializeOpReplicateKey) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpReplicateKey) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorReplicateKey(response, &metadata) + } + output := &ReplicateKeyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorReplicateKey(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AlreadyExistsException", errorCode): + return awsAwsjson11_deserializeErrorAlreadyExistsException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("MalformedPolicyDocumentException", errorCode): + return awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRetireGrant struct { +} + +func (*awsAwsjson11_deserializeOpRetireGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRetireGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRetireGrant(response, &metadata) + } + output := &RetireGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRetireGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_deserializeOpRevokeGrant) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpRevokeGrant) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorRevokeGrant(response, &metadata) + } + output := &RevokeGrantOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorRevokeGrant(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("InvalidGrantIdException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantIdException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_deserializeOpScheduleKeyDeletion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpScheduleKeyDeletion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response, &metadata) + } + output := &ScheduleKeyDeletionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorScheduleKeyDeletion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpSign struct { +} + +func (*awsAwsjson11_deserializeOpSign) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpSign) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorSign(response, &metadata) + } + output := &SignOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentSignOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorSign(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpTagResource struct { +} + +func (*awsAwsjson11_deserializeOpTagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorTagResource(response, &metadata) + } + output := &TagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUntagResource struct { +} + +func (*awsAwsjson11_deserializeOpUntagResource) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUntagResource(response, &metadata) + } + output := &UntagResourceOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("TagException", errorCode): + return awsAwsjson11_deserializeErrorTagException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_deserializeOpUpdateAlias) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateAlias) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateAlias(response, &metadata) + } + output := &UpdateAliasOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateAlias(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson11_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_deserializeOpUpdateCustomKeyStore) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateCustomKeyStore) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response, &metadata) + } + output := &UpdateCustomKeyStoreOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateCustomKeyStore(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("CloudHsmClusterInvalidConfigurationException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotActiveException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response, errorBody) + + case strings.EqualFold("CloudHsmClusterNotRelatedException", errorCode): + return awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNameInUseException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response, errorBody) + + case strings.EqualFold("CustomKeyStoreNotFoundException", errorCode): + return awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_deserializeOpUpdateKeyDescription) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdateKeyDescription) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response, &metadata) + } + output := &UpdateKeyDescriptionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdateKeyDescription(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_deserializeOpUpdatePrimaryRegion) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpUpdatePrimaryRegion) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response, &metadata) + } + output := &UpdatePrimaryRegionOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorUpdatePrimaryRegion(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidArnException", errorCode): + return awsAwsjson11_deserializeErrorInvalidArnException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + case strings.EqualFold("UnsupportedOperationException", errorCode): + return awsAwsjson11_deserializeErrorUnsupportedOperationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerify struct { +} + +func (*awsAwsjson11_deserializeOpVerify) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerify) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerify(response, &metadata) + } + output := &VerifyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerify(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DependencyTimeoutException", errorCode): + return awsAwsjson11_deserializeErrorDependencyTimeoutException(response, errorBody) + + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidSignatureException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsjson11_deserializeOpVerifyMac struct { +} + +func (*awsAwsjson11_deserializeOpVerifyMac) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson11_deserializeOpVerifyMac) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson11_deserializeOpErrorVerifyMac(response, &metadata) + } + output := &VerifyMacOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson11_deserializeOpDocumentVerifyMacOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson11_deserializeOpErrorVerifyMac(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("DisabledException", errorCode): + return awsAwsjson11_deserializeErrorDisabledException(response, errorBody) + + case strings.EqualFold("InvalidGrantTokenException", errorCode): + return awsAwsjson11_deserializeErrorInvalidGrantTokenException(response, errorBody) + + case strings.EqualFold("InvalidKeyUsageException", errorCode): + return awsAwsjson11_deserializeErrorInvalidKeyUsageException(response, errorBody) + + case strings.EqualFold("KMSInternalException", errorCode): + return awsAwsjson11_deserializeErrorKMSInternalException(response, errorBody) + + case strings.EqualFold("KMSInvalidMacException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidMacException(response, errorBody) + + case strings.EqualFold("KMSInvalidStateException", errorCode): + return awsAwsjson11_deserializeErrorKMSInvalidStateException(response, errorBody) + + case strings.EqualFold("KeyUnavailableException", errorCode): + return awsAwsjson11_deserializeErrorKeyUnavailableException(response, errorBody) + + case strings.EqualFold("NotFoundException", errorCode): + return awsAwsjson11_deserializeErrorNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsAwsjson11_deserializeErrorAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.AlreadyExistsException{} + err := awsAwsjson11_deserializeDocumentAlreadyExistsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInUseException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterInvalidConfigurationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterInvalidConfigurationException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotActiveException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotActiveException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotFoundException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCloudHsmClusterNotRelatedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CloudHsmClusterNotRelatedException{} + err := awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreHasCMKsException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreHasCMKsException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreInvalidStateException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNameInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNameInUseException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorCustomKeyStoreNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.CustomKeyStoreNotFoundException{} + err := awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDependencyTimeoutException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DependencyTimeoutException{} + err := awsAwsjson11_deserializeDocumentDependencyTimeoutException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorDisabledException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.DisabledException{} + err := awsAwsjson11_deserializeDocumentDisabledException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorExpiredImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.ExpiredImportTokenException{} + err := awsAwsjson11_deserializeDocumentExpiredImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectKeyMaterialException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectKeyMaterialException{} + err := awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorIncorrectTrustAnchorException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.IncorrectTrustAnchorException{} + err := awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidAliasNameException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidAliasNameException{} + err := awsAwsjson11_deserializeDocumentInvalidAliasNameException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidArnException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidArnException{} + err := awsAwsjson11_deserializeDocumentInvalidArnException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidCiphertextException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidCiphertextException{} + err := awsAwsjson11_deserializeDocumentInvalidCiphertextException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantIdException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantIdException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantIdException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidGrantTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidGrantTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidGrantTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidImportTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidImportTokenException{} + err := awsAwsjson11_deserializeDocumentInvalidImportTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidKeyUsageException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidKeyUsageException{} + err := awsAwsjson11_deserializeDocumentInvalidKeyUsageException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorInvalidMarkerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.InvalidMarkerException{} + err := awsAwsjson11_deserializeDocumentInvalidMarkerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKeyUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KeyUnavailableException{} + err := awsAwsjson11_deserializeDocumentKeyUnavailableException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInternalException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInternalException{} + err := awsAwsjson11_deserializeDocumentKMSInternalException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidMacException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidMacException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidMacException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidSignatureException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidSignatureException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorKMSInvalidStateException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.KMSInvalidStateException{} + err := awsAwsjson11_deserializeDocumentKMSInvalidStateException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.LimitExceededException{} + err := awsAwsjson11_deserializeDocumentLimitExceededException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorMalformedPolicyDocumentException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.MalformedPolicyDocumentException{} + err := awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.NotFoundException{} + err := awsAwsjson11_deserializeDocumentNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorTagException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.TagException{} + err := awsAwsjson11_deserializeDocumentTagException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeErrorUnsupportedOperationException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.UnsupportedOperationException{} + err := awsAwsjson11_deserializeDocumentUnsupportedOperationException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + +func awsAwsjson11_deserializeDocumentAliasList(v *[]types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.AliasListEntry + if *v == nil { + cv = []types.AliasListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.AliasListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentAliasListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentAliasListEntry(v **types.AliasListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AliasListEntry + if *v == nil { + sv = &types.AliasListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "AliasArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.AliasArn = ptr.String(jtv) + } + + case "AliasName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AliasNameType to be of type string, got %T instead", value) + } + sv.AliasName = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "LastUpdatedDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.LastUpdatedDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "TargetKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.TargetKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentAlreadyExistsException(v **types.AlreadyExistsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AlreadyExistsException + if *v == nil { + sv = &types.AlreadyExistsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInUseException(v **types.CloudHsmClusterInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInUseException + if *v == nil { + sv = &types.CloudHsmClusterInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterInvalidConfigurationException(v **types.CloudHsmClusterInvalidConfigurationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterInvalidConfigurationException + if *v == nil { + sv = &types.CloudHsmClusterInvalidConfigurationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotActiveException(v **types.CloudHsmClusterNotActiveException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotActiveException + if *v == nil { + sv = &types.CloudHsmClusterNotActiveException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotFoundException(v **types.CloudHsmClusterNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotFoundException + if *v == nil { + sv = &types.CloudHsmClusterNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCloudHsmClusterNotRelatedException(v **types.CloudHsmClusterNotRelatedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CloudHsmClusterNotRelatedException + if *v == nil { + sv = &types.CloudHsmClusterNotRelatedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreHasCMKsException(v **types.CustomKeyStoreHasCMKsException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreHasCMKsException + if *v == nil { + sv = &types.CustomKeyStoreHasCMKsException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreInvalidStateException(v **types.CustomKeyStoreInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreInvalidStateException + if *v == nil { + sv = &types.CustomKeyStoreInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNameInUseException(v **types.CustomKeyStoreNameInUseException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNameInUseException + if *v == nil { + sv = &types.CustomKeyStoreNameInUseException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoreNotFoundException(v **types.CustomKeyStoreNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoreNotFoundException + if *v == nil { + sv = &types.CustomKeyStoreNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresList(v *[]types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.CustomKeyStoresListEntry + if *v == nil { + cv = []types.CustomKeyStoresListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.CustomKeyStoresListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentCustomKeyStoresListEntry(v **types.CustomKeyStoresListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.CustomKeyStoresListEntry + if *v == nil { + sv = &types.CustomKeyStoresListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "ConnectionErrorCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionErrorCodeType to be of type string, got %T instead", value) + } + sv.ConnectionErrorCode = types.ConnectionErrorCodeType(jtv) + } + + case "ConnectionState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ConnectionStateType to be of type string, got %T instead", value) + } + sv.ConnectionState = types.ConnectionStateType(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "CustomKeyStoreName": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreNameType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreName = ptr.String(jtv) + } + + case "TrustAnchorCertificate": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TrustAnchorCertificateType to be of type string, got %T instead", value) + } + sv.TrustAnchorCertificate = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDependencyTimeoutException(v **types.DependencyTimeoutException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DependencyTimeoutException + if *v == nil { + sv = &types.DependencyTimeoutException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentDisabledException(v **types.DisabledException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.DisabledException + if *v == nil { + sv = &types.DisabledException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(v *[]types.EncryptionAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.EncryptionAlgorithmSpec + if *v == nil { + cv = []types.EncryptionAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.EncryptionAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.EncryptionAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentEncryptionContextType(v *map[string]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var mv map[string]string + if *v == nil { + mv = map[string]string{} + } else { + mv = *v + } + + for key, value := range shape { + var parsedVal string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionContextValue to be of type string, got %T instead", value) + } + parsedVal = jtv + } + mv[key] = parsedVal + + } + *v = mv + return nil +} + +func awsAwsjson11_deserializeDocumentExpiredImportTokenException(v **types.ExpiredImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredImportTokenException + if *v == nil { + sv = &types.ExpiredImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantConstraints(v **types.GrantConstraints, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantConstraints + if *v == nil { + sv = &types.GrantConstraints{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionContextEquals": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextEquals, value); err != nil { + return err + } + + case "EncryptionContextSubset": + if err := awsAwsjson11_deserializeDocumentEncryptionContextType(&sv.EncryptionContextSubset, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantList(v *[]types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantListEntry + if *v == nil { + cv = []types.GrantListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentGrantListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantListEntry(v **types.GrantListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.GrantListEntry + if *v == nil { + sv = &types.GrantListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Constraints": + if err := awsAwsjson11_deserializeDocumentGrantConstraints(&sv.Constraints, value); err != nil { + return err + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "GranteePrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.GranteePrincipal = ptr.String(jtv) + } + + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "IssuingAccount": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.IssuingAccount = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Name": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantNameType to be of type string, got %T instead", value) + } + sv.Name = ptr.String(jtv) + } + + case "Operations": + if err := awsAwsjson11_deserializeDocumentGrantOperationList(&sv.Operations, value); err != nil { + return err + } + + case "RetiringPrincipal": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PrincipalIdType to be of type string, got %T instead", value) + } + sv.RetiringPrincipal = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentGrantOperationList(v *[]types.GrantOperation, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.GrantOperation + if *v == nil { + cv = []types.GrantOperation{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.GrantOperation + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantOperation to be of type string, got %T instead", value) + } + col = types.GrantOperation(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyException(v **types.IncorrectKeyException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyException + if *v == nil { + sv = &types.IncorrectKeyException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectKeyMaterialException(v **types.IncorrectKeyMaterialException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectKeyMaterialException + if *v == nil { + sv = &types.IncorrectKeyMaterialException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentIncorrectTrustAnchorException(v **types.IncorrectTrustAnchorException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.IncorrectTrustAnchorException + if *v == nil { + sv = &types.IncorrectTrustAnchorException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidAliasNameException(v **types.InvalidAliasNameException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidAliasNameException + if *v == nil { + sv = &types.InvalidAliasNameException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidArnException(v **types.InvalidArnException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidArnException + if *v == nil { + sv = &types.InvalidArnException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidCiphertextException(v **types.InvalidCiphertextException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidCiphertextException + if *v == nil { + sv = &types.InvalidCiphertextException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantIdException(v **types.InvalidGrantIdException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantIdException + if *v == nil { + sv = &types.InvalidGrantIdException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidGrantTokenException(v **types.InvalidGrantTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantTokenException + if *v == nil { + sv = &types.InvalidGrantTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidImportTokenException(v **types.InvalidImportTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidImportTokenException + if *v == nil { + sv = &types.InvalidImportTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidKeyUsageException(v **types.InvalidKeyUsageException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidKeyUsageException + if *v == nil { + sv = &types.InvalidKeyUsageException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentInvalidMarkerException(v **types.InvalidMarkerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidMarkerException + if *v == nil { + sv = &types.InvalidMarkerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyList(v *[]types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.KeyListEntry + if *v == nil { + cv = []types.KeyListEntry{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.KeyListEntry + destAddr := &col + if err := awsAwsjson11_deserializeDocumentKeyListEntry(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyListEntry(v **types.KeyListEntry, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyListEntry + if *v == nil { + sv = &types.KeyListEntry{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyArn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.KeyArn = ptr.String(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyMetadata(v **types.KeyMetadata, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyMetadata + if *v == nil { + sv = &types.KeyMetadata{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "AWSAccountId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AWSAccountIdType to be of type string, got %T instead", value) + } + sv.AWSAccountId = ptr.String(jtv) + } + + case "CloudHsmClusterId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CloudHsmClusterIdType to be of type string, got %T instead", value) + } + sv.CloudHsmClusterId = ptr.String(jtv) + } + + case "CreationDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.CreationDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "Description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DescriptionType to be of type string, got %T instead", value) + } + sv.Description = ptr.String(jtv) + } + + case "Enabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Enabled = jtv + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "ExpirationModel": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ExpirationModelType to be of type string, got %T instead", value) + } + sv.ExpirationModel = types.ExpirationModelType(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyManager": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyManagerType to be of type string, got %T instead", value) + } + sv.KeyManager = types.KeyManagerType(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "MacAlgorithms": + if err := awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(&sv.MacAlgorithms, value); err != nil { + return err + } + + case "MultiRegion": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected NullableBooleanType to be of type *bool, got %T instead", value) + } + sv.MultiRegion = ptr.Bool(jtv) + } + + case "MultiRegionConfiguration": + if err := awsAwsjson11_deserializeDocumentMultiRegionConfiguration(&sv.MultiRegionConfiguration, value); err != nil { + return err + } + + case "Origin": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected OriginType to be of type string, got %T instead", value) + } + sv.Origin = types.OriginType(jtv) + } + + case "PendingDeletionWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingDeletionWindowInDays = ptr.Int32(int32(i64)) + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + case "ValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKeyUnavailableException(v **types.KeyUnavailableException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KeyUnavailableException + if *v == nil { + sv = &types.KeyUnavailableException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInternalException(v **types.KMSInternalException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInternalException + if *v == nil { + sv = &types.KMSInternalException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidMacException(v **types.KMSInvalidMacException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidMacException + if *v == nil { + sv = &types.KMSInvalidMacException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidSignatureException(v **types.KMSInvalidSignatureException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidSignatureException + if *v == nil { + sv = &types.KMSInvalidSignatureException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentKMSInvalidStateException(v **types.KMSInvalidStateException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.KMSInvalidStateException + if *v == nil { + sv = &types.KMSInvalidStateException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.LimitExceededException + if *v == nil { + sv = &types.LimitExceededException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMacAlgorithmSpecList(v *[]types.MacAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MacAlgorithmSpec + if *v == nil { + cv = []types.MacAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MacAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.MacAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentMalformedPolicyDocumentException(v **types.MalformedPolicyDocumentException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MalformedPolicyDocumentException + if *v == nil { + sv = &types.MalformedPolicyDocumentException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionConfiguration(v **types.MultiRegionConfiguration, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionConfiguration + if *v == nil { + sv = &types.MultiRegionConfiguration{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "MultiRegionKeyType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MultiRegionKeyType to be of type string, got %T instead", value) + } + sv.MultiRegionKeyType = types.MultiRegionKeyType(jtv) + } + + case "PrimaryKey": + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&sv.PrimaryKey, value); err != nil { + return err + } + + case "ReplicaKeys": + if err := awsAwsjson11_deserializeDocumentMultiRegionKeyList(&sv.ReplicaKeys, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKey(v **types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.MultiRegionKey + if *v == nil { + sv = &types.MultiRegionKey{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Arn": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ArnType to be of type string, got %T instead", value) + } + sv.Arn = ptr.String(jtv) + } + + case "Region": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RegionType to be of type string, got %T instead", value) + } + sv.Region = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentMultiRegionKeyList(v *[]types.MultiRegionKey, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.MultiRegionKey + if *v == nil { + cv = []types.MultiRegionKey{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.MultiRegionKey + destAddr := &col + if err := awsAwsjson11_deserializeDocumentMultiRegionKey(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentNotFoundException(v **types.NotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.NotFoundException + if *v == nil { + sv = &types.NotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentPolicyNameList(v *[]string, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []string + if *v == nil { + cv = []string{} + } else { + cv = *v + } + + for _, value := range shape { + var col string + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyNameType to be of type string, got %T instead", value) + } + col = jtv + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(v *[]types.SigningAlgorithmSpec, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.SigningAlgorithmSpec + if *v == nil { + cv = []types.SigningAlgorithmSpec{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.SigningAlgorithmSpec + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + col = types.SigningAlgorithmSpec(jtv) + } + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentTag(v **types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.Tag + if *v == nil { + sv = &types.Tag{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "TagKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagKeyType to be of type string, got %T instead", value) + } + sv.TagKey = ptr.String(jtv) + } + + case "TagValue": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TagValueType to be of type string, got %T instead", value) + } + sv.TagValue = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagException(v **types.TagException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.TagException + if *v == nil { + sv = &types.TagException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.([]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var cv []types.Tag + if *v == nil { + cv = []types.Tag{} + } else { + cv = *v + } + + for _, value := range shape { + var col types.Tag + destAddr := &col + if err := awsAwsjson11_deserializeDocumentTag(&destAddr, value); err != nil { + return err + } + col = *destAddr + cv = append(cv, col) + + } + *v = cv + return nil +} + +func awsAwsjson11_deserializeDocumentUnsupportedOperationException(v **types.UnsupportedOperationException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedOperationException + if *v == nil { + sv = &types.UnsupportedOperationException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessageType to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCancelKeyDeletionOutput(v **CancelKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CancelKeyDeletionOutput + if *v == nil { + sv = &CancelKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentConnectCustomKeyStoreOutput(v **ConnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ConnectCustomKeyStoreOutput + if *v == nil { + sv = &ConnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateCustomKeyStoreOutput(v **CreateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateCustomKeyStoreOutput + if *v == nil { + sv = &CreateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStoreId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomKeyStoreIdType to be of type string, got %T instead", value) + } + sv.CustomKeyStoreId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateGrantOutput(v **CreateGrantOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateGrantOutput + if *v == nil { + sv = &CreateGrantOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "GrantId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantIdType to be of type string, got %T instead", value) + } + sv.GrantId = ptr.String(jtv) + } + + case "GrantToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected GrantTokenType to be of type string, got %T instead", value) + } + sv.GrantToken = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentCreateKeyOutput(v **CreateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateKeyOutput + if *v == nil { + sv = &CreateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDecryptOutput(v **DecryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DecryptOutput + if *v == nil { + sv = &DecryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDeleteCustomKeyStoreOutput(v **DeleteCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteCustomKeyStoreOutput + if *v == nil { + sv = &DeleteCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeCustomKeyStoresOutput(v **DescribeCustomKeyStoresOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeCustomKeyStoresOutput + if *v == nil { + sv = &DescribeCustomKeyStoresOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomKeyStores": + if err := awsAwsjson11_deserializeDocumentCustomKeyStoresList(&sv.CustomKeyStores, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDescribeKeyOutput(v **DescribeKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DescribeKeyOutput + if *v == nil { + sv = &DescribeKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.KeyMetadata, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentDisconnectCustomKeyStoreOutput(v **DisconnectCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DisconnectCustomKeyStoreOutput + if *v == nil { + sv = &DisconnectCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentEncryptOutput(v **EncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *EncryptOutput + if *v == nil { + sv = &EncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "EncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.EncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyOutput(v **GenerateDataKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyOutput + if *v == nil { + sv = &GenerateDataKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairOutput(v **GenerateDataKeyPairOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairOutput + if *v == nil { + sv = &GenerateDataKeyPairOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PrivateKeyPlaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PrivateKeyPlaintext = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyPairWithoutPlaintextOutput(v **GenerateDataKeyPairWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyPairWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyPairWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyPairSpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DataKeyPairSpec to be of type string, got %T instead", value) + } + sv.KeyPairSpec = types.DataKeyPairSpec(jtv) + } + + case "PrivateKeyCiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.PrivateKeyCiphertextBlob = dv + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateDataKeyWithoutPlaintextOutput(v **GenerateDataKeyWithoutPlaintextOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateDataKeyWithoutPlaintextOutput + if *v == nil { + sv = &GenerateDataKeyWithoutPlaintextOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateMacOutput(v **GenerateMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateMacOutput + if *v == nil { + sv = &GenerateMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Mac": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Mac = dv + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGenerateRandomOutput(v **GenerateRandomOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GenerateRandomOutput + if *v == nil { + sv = &GenerateRandomOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Plaintext": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.Plaintext = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyPolicyOutput(v **GetKeyPolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyPolicyOutput + if *v == nil { + sv = &GetKeyPolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetKeyRotationStatusOutput(v **GetKeyRotationStatusOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetKeyRotationStatusOutput + if *v == nil { + sv = &GetKeyRotationStatusOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyRotationEnabled": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.KeyRotationEnabled = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetParametersForImportOutput(v **GetParametersForImportOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetParametersForImportOutput + if *v == nil { + sv = &GetParametersForImportOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ImportToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.ImportToken = dv + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "ParametersValidTo": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.ParametersValidTo = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PlaintextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PlaintextType, %w", err) + } + sv.PublicKey = dv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentGetPublicKeyOutput(v **GetPublicKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetPublicKeyOutput + if *v == nil { + sv = &GetPublicKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CustomerMasterKeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CustomerMasterKeySpec to be of type string, got %T instead", value) + } + sv.CustomerMasterKeySpec = types.CustomerMasterKeySpec(jtv) + } + + case "EncryptionAlgorithms": + if err := awsAwsjson11_deserializeDocumentEncryptionAlgorithmSpecList(&sv.EncryptionAlgorithms, value); err != nil { + return err + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeySpec": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeySpec to be of type string, got %T instead", value) + } + sv.KeySpec = types.KeySpec(jtv) + } + + case "KeyUsage": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyUsageType to be of type string, got %T instead", value) + } + sv.KeyUsage = types.KeyUsageType(jtv) + } + + case "PublicKey": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PublicKeyType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode PublicKeyType, %w", err) + } + sv.PublicKey = dv + } + + case "SigningAlgorithms": + if err := awsAwsjson11_deserializeDocumentSigningAlgorithmSpecList(&sv.SigningAlgorithms, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentImportKeyMaterialOutput(v **ImportKeyMaterialOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ImportKeyMaterialOutput + if *v == nil { + sv = &ImportKeyMaterialOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListAliasesOutput(v **ListAliasesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListAliasesOutput + if *v == nil { + sv = &ListAliasesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Aliases": + if err := awsAwsjson11_deserializeDocumentAliasList(&sv.Aliases, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListGrantsOutput(v **ListGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListGrantsOutput + if *v == nil { + sv = &ListGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeyPoliciesOutput(v **ListKeyPoliciesOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeyPoliciesOutput + if *v == nil { + sv = &ListKeyPoliciesOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "PolicyNames": + if err := awsAwsjson11_deserializeDocumentPolicyNameList(&sv.PolicyNames, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListKeysOutput(v **ListKeysOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListKeysOutput + if *v == nil { + sv = &ListKeysOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Keys": + if err := awsAwsjson11_deserializeDocumentKeyList(&sv.Keys, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListResourceTagsOutput(v **ListResourceTagsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListResourceTagsOutput + if *v == nil { + sv = &ListResourceTagsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Tags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.Tags, value); err != nil { + return err + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentListRetirableGrantsOutput(v **ListRetirableGrantsOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ListRetirableGrantsOutput + if *v == nil { + sv = &ListRetirableGrantsOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Grants": + if err := awsAwsjson11_deserializeDocumentGrantList(&sv.Grants, value); err != nil { + return err + } + + case "NextMarker": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MarkerType to be of type string, got %T instead", value) + } + sv.NextMarker = ptr.String(jtv) + } + + case "Truncated": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.Truncated = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReEncryptOutput(v **ReEncryptOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReEncryptOutput + if *v == nil { + sv = &ReEncryptOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "CiphertextBlob": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.CiphertextBlob = dv + } + + case "DestinationEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.DestinationEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SourceEncryptionAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected EncryptionAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SourceEncryptionAlgorithm = types.EncryptionAlgorithmSpec(jtv) + } + + case "SourceKeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.SourceKeyId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentReplicateKeyOutput(v **ReplicateKeyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ReplicateKeyOutput + if *v == nil { + sv = &ReplicateKeyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ReplicaKeyMetadata": + if err := awsAwsjson11_deserializeDocumentKeyMetadata(&sv.ReplicaKeyMetadata, value); err != nil { + return err + } + + case "ReplicaPolicy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyType to be of type string, got %T instead", value) + } + sv.ReplicaPolicy = ptr.String(jtv) + } + + case "ReplicaTags": + if err := awsAwsjson11_deserializeDocumentTagList(&sv.ReplicaTags, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentScheduleKeyDeletionOutput(v **ScheduleKeyDeletionOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *ScheduleKeyDeletionOutput + if *v == nil { + sv = &ScheduleKeyDeletionOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "DeletionDate": + if value != nil { + switch jtv := value.(type) { + case json.Number: + f64, err := jtv.Float64() + if err != nil { + return err + } + sv.DeletionDate = ptr.Time(smithytime.ParseEpochSeconds(f64)) + + default: + return fmt.Errorf("expected DateType to be a JSON Number, got %T instead", value) + + } + } + + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "KeyState": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyState to be of type string, got %T instead", value) + } + sv.KeyState = types.KeyState(jtv) + } + + case "PendingWindowInDays": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected PendingWindowInDaysType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.PendingWindowInDays = ptr.Int32(int32(i64)) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentSignOutput(v **SignOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *SignOutput + if *v == nil { + sv = &SignOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "Signature": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected CiphertextType to be []byte, got %T instead", value) + } + dv, err := base64.StdEncoding.DecodeString(jtv) + if err != nil { + return fmt.Errorf("failed to base64 decode CiphertextType, %w", err) + } + sv.Signature = dv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentUpdateCustomKeyStoreOutput(v **UpdateCustomKeyStoreOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *UpdateCustomKeyStoreOutput + if *v == nil { + sv = &UpdateCustomKeyStoreOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyMacOutput(v **VerifyMacOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyMacOutput + if *v == nil { + sv = &VerifyMacOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "MacAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected MacAlgorithmSpec to be of type string, got %T instead", value) + } + sv.MacAlgorithm = types.MacAlgorithmSpec(jtv) + } + + case "MacValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.MacValid = jtv + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsAwsjson11_deserializeOpDocumentVerifyOutput(v **VerifyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *VerifyOutput + if *v == nil { + sv = &VerifyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "KeyId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected KeyIdType to be of type string, got %T instead", value) + } + sv.KeyId = ptr.String(jtv) + } + + case "SignatureValid": + if value != nil { + jtv, ok := value.(bool) + if !ok { + return fmt.Errorf("expected BooleanType to be of type *bool, got %T instead", value) + } + sv.SignatureValid = jtv + } + + case "SigningAlgorithm": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SigningAlgorithmSpec to be of type string, got %T instead", value) + } + sv.SigningAlgorithm = types.SigningAlgorithmSpec(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go new file mode 100644 index 0000000000..9840b2b978 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/doc.go @@ -0,0 +1,78 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package kms provides the API client, operations, and parameter types for AWS Key +// Management Service. +// +// Key Management Service Key Management Service (KMS) is an encryption and key +// management web service. This guide describes the KMS operations that you can +// call programmatically. For general information about KMS, see the Key +// Management Service Developer Guide +// (https://docs.aws.amazon.com/kms/latest/developerguide/). KMS is replacing the +// term customer master key (CMK) with KMS key and KMS key. The concept has not +// changed. To prevent breaking changes, KMS is keeping some variations of this +// term. Amazon Web Services provides SDKs that consist of libraries and sample +// code for various programming languages and platforms (Java, Ruby, .Net, macOS, +// Android, etc.). The SDKs provide a convenient way to create programmatic access +// to KMS and other Amazon Web Services services. For example, the SDKs take care +// of tasks such as signing requests (see below), managing errors, and retrying +// requests automatically. For more information about the Amazon Web Services SDKs, +// including how to download and install them, see Tools for Amazon Web Services +// (http://aws.amazon.com/tools/). We recommend that you use the Amazon Web +// Services SDKs to make programmatic API calls to KMS. If you need to use FIPS +// 140-2 validated cryptographic modules when communicating with Amazon Web +// Services, use the FIPS endpoint in your preferred Amazon Web Services Region. +// For more information about the available FIPS endpoints, see Service endpoints +// (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the Key +// Management Service topic of the Amazon Web Services General Reference. All KMS +// API calls must be signed and be transmitted using Transport Layer Security +// (TLS). KMS recommends you always use the latest supported TLS version. Clients +// must also support cipher suites with Perfect Forward Secrecy (PFS) such as +// Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral Diffie-Hellman +// (ECDHE). Most modern systems such as Java 7 and later support these modes. +// Signing Requests Requests must be signed by using an access key ID and a secret +// access key. We strongly recommend that you do not use your Amazon Web Services +// account (root) access key ID and secret key for everyday work with KMS. Instead, +// use the access key ID and secret access key for an IAM user. You can also use +// the Amazon Web Services Security Token Service to generate temporary security +// credentials that you can use to sign requests. All KMS operations require +// Signature Version 4 +// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// Logging API Requests KMS supports CloudTrail, a service that logs Amazon Web +// Services API calls and related events for your Amazon Web Services account and +// delivers them to an Amazon S3 bucket that you specify. By using the information +// collected by CloudTrail, you can determine what requests were made to KMS, who +// made the request, when it was made, and so on. To learn more about CloudTrail, +// including how to turn it on and find your log files, see the CloudTrail User +// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/). Additional +// Resources For more information about credentials and request signing, see the +// following: +// +// * Amazon Web Services Security Credentials +// (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) - +// This topic provides general information about the types of credentials used to +// access Amazon Web Services. +// +// * Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) - +// This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// +// * Signature Version 4 Signing Process +// (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) - This +// set of topics walks you through the process of signing a request using an access +// key ID and a secret access key. +// +// Commonly Used API Operations Of the API +// operations discussed in this guide, the following will prove the most useful for +// most applications. You will likely perform operations other than these, such as +// creating keys and assigning policies, by using the console. +// +// * Encrypt +// +// * +// Decrypt +// +// * GenerateDataKey +// +// * GenerateDataKeyWithoutPlaintext +package kms diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go new file mode 100644 index 0000000000..43b65e982c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "kms" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json new file mode 100644 index 0000000000..68a551b7b6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/generated.json @@ -0,0 +1,77 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CancelKeyDeletion.go", + "api_op_ConnectCustomKeyStore.go", + "api_op_CreateAlias.go", + "api_op_CreateCustomKeyStore.go", + "api_op_CreateGrant.go", + "api_op_CreateKey.go", + "api_op_Decrypt.go", + "api_op_DeleteAlias.go", + "api_op_DeleteCustomKeyStore.go", + "api_op_DeleteImportedKeyMaterial.go", + "api_op_DescribeCustomKeyStores.go", + "api_op_DescribeKey.go", + "api_op_DisableKey.go", + "api_op_DisableKeyRotation.go", + "api_op_DisconnectCustomKeyStore.go", + "api_op_EnableKey.go", + "api_op_EnableKeyRotation.go", + "api_op_Encrypt.go", + "api_op_GenerateDataKey.go", + "api_op_GenerateDataKeyPair.go", + "api_op_GenerateDataKeyPairWithoutPlaintext.go", + "api_op_GenerateDataKeyWithoutPlaintext.go", + "api_op_GenerateMac.go", + "api_op_GenerateRandom.go", + "api_op_GetKeyPolicy.go", + "api_op_GetKeyRotationStatus.go", + "api_op_GetParametersForImport.go", + "api_op_GetPublicKey.go", + "api_op_ImportKeyMaterial.go", + "api_op_ListAliases.go", + "api_op_ListGrants.go", + "api_op_ListKeyPolicies.go", + "api_op_ListKeys.go", + "api_op_ListResourceTags.go", + "api_op_ListRetirableGrants.go", + "api_op_PutKeyPolicy.go", + "api_op_ReEncrypt.go", + "api_op_ReplicateKey.go", + "api_op_RetireGrant.go", + "api_op_RevokeGrant.go", + "api_op_ScheduleKeyDeletion.go", + "api_op_Sign.go", + "api_op_TagResource.go", + "api_op_UntagResource.go", + "api_op_UpdateAlias.go", + "api_op_UpdateCustomKeyStore.go", + "api_op_UpdateKeyDescription.go", + "api_op_UpdatePrimaryRegion.go", + "api_op_Verify.go", + "api_op_VerifyMac.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/enums.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/kms", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go new file mode 100644 index 0000000000..5235d17a88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package kms + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.18.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go new file mode 100644 index 0000000000..58dc054960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/internal/endpoints/endpoints.go @@ -0,0 +1,797 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver KMS endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "af-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "af-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "af-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "af-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-north-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-north-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-west-3", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "eu-west-3-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-central-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-south-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "me-south-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "sa-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "sa-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-east-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-east-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-west-2", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-west-2-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{}, + }, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "kms.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "kms-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "kms.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ProdFips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-east-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1-fips", + }: endpoints.Endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: aws.TrueTernary, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go new file mode 100644 index 0000000000..893e4bf05d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/serializers.go @@ -0,0 +1,4067 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "bytes" + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithytime "github.com/aws/smithy-go/time" + smithyhttp "github.com/aws/smithy-go/transport/http" + "path" +) + +type awsAwsjson11_serializeOpCancelKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpCancelKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCancelKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CancelKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CancelKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpConnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpConnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpConnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ConnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateAlias struct { +} + +func (*awsAwsjson11_serializeOpCreateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpCreateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateGrant struct { +} + +func (*awsAwsjson11_serializeOpCreateGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpCreateKey struct { +} + +func (*awsAwsjson11_serializeOpCreateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpCreateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.CreateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentCreateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDecrypt struct { +} + +func (*awsAwsjson11_serializeOpDecrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDecrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DecryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Decrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDecryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteAlias struct { +} + +func (*awsAwsjson11_serializeOpDeleteAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDeleteCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDeleteImportedKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpDeleteImportedKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDeleteImportedKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DeleteImportedKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeCustomKeyStores struct { +} + +func (*awsAwsjson11_serializeOpDescribeCustomKeyStores) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeCustomKeyStores) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeCustomKeyStoresInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeCustomKeyStores") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDescribeKey struct { +} + +func (*awsAwsjson11_serializeOpDescribeKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDescribeKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DescribeKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDescribeKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKey struct { +} + +func (*awsAwsjson11_serializeOpDisableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpDisableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpDisconnectCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpDisconnectCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpDisconnectCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.DisconnectCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKey struct { +} + +func (*awsAwsjson11_serializeOpEnableKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEnableKeyRotation struct { +} + +func (*awsAwsjson11_serializeOpEnableKeyRotation) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEnableKeyRotation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EnableKeyRotationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.EnableKeyRotation") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpEncrypt struct { +} + +func (*awsAwsjson11_serializeOpEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*EncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Encrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKey struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPair struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPair) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPair) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPair") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyPairWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyPairWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateDataKeyWithoutPlaintext) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateDataKeyWithoutPlaintext") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateMac struct { +} + +func (*awsAwsjson11_serializeOpGenerateMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGenerateRandom struct { +} + +func (*awsAwsjson11_serializeOpGenerateRandom) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGenerateRandom) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GenerateRandomInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GenerateRandom") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGenerateRandomInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpGetKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetKeyRotationStatus struct { +} + +func (*awsAwsjson11_serializeOpGetKeyRotationStatus) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetKeyRotationStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetKeyRotationStatus") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetParametersForImport struct { +} + +func (*awsAwsjson11_serializeOpGetParametersForImport) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetParametersForImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetParametersForImportInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetParametersForImport") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetParametersForImportInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpGetPublicKey struct { +} + +func (*awsAwsjson11_serializeOpGetPublicKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpGetPublicKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetPublicKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.GetPublicKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentGetPublicKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpImportKeyMaterial struct { +} + +func (*awsAwsjson11_serializeOpImportKeyMaterial) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpImportKeyMaterial) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ImportKeyMaterialInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ImportKeyMaterial") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListAliases struct { +} + +func (*awsAwsjson11_serializeOpListAliases) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListAliases) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListAliasesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListAliases") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListAliasesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListGrants struct { +} + +func (*awsAwsjson11_serializeOpListGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeyPolicies struct { +} + +func (*awsAwsjson11_serializeOpListKeyPolicies) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeyPolicies) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeyPoliciesInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeyPolicies") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListKeys struct { +} + +func (*awsAwsjson11_serializeOpListKeys) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListKeys) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListKeysInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListKeys") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListKeysInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListResourceTags struct { +} + +func (*awsAwsjson11_serializeOpListResourceTags) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListResourceTags) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListResourceTagsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListResourceTags") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListResourceTagsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpListRetirableGrants struct { +} + +func (*awsAwsjson11_serializeOpListRetirableGrants) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpListRetirableGrants) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListRetirableGrantsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ListRetirableGrants") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpPutKeyPolicy struct { +} + +func (*awsAwsjson11_serializeOpPutKeyPolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpPutKeyPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutKeyPolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.PutKeyPolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReEncrypt struct { +} + +func (*awsAwsjson11_serializeOpReEncrypt) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReEncrypt) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReEncryptInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReEncrypt") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReEncryptInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpReplicateKey struct { +} + +func (*awsAwsjson11_serializeOpReplicateKey) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpReplicateKey) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ReplicateKeyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ReplicateKey") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentReplicateKeyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRetireGrant struct { +} + +func (*awsAwsjson11_serializeOpRetireGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRetireGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RetireGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RetireGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRetireGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpRevokeGrant struct { +} + +func (*awsAwsjson11_serializeOpRevokeGrant) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpRevokeGrant) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RevokeGrantInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.RevokeGrant") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentRevokeGrantInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpScheduleKeyDeletion struct { +} + +func (*awsAwsjson11_serializeOpScheduleKeyDeletion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpScheduleKeyDeletion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.ScheduleKeyDeletion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpSign struct { +} + +func (*awsAwsjson11_serializeOpSign) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpSign) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*SignInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Sign") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentSignInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpTagResource struct { +} + +func (*awsAwsjson11_serializeOpTagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*TagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.TagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUntagResource struct { +} + +func (*awsAwsjson11_serializeOpUntagResource) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UntagResourceInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UntagResource") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateAlias struct { +} + +func (*awsAwsjson11_serializeOpUpdateAlias) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateAlias) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateAliasInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateAlias") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateAliasInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateCustomKeyStore struct { +} + +func (*awsAwsjson11_serializeOpUpdateCustomKeyStore) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateCustomKeyStore) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateCustomKeyStore") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdateKeyDescription struct { +} + +func (*awsAwsjson11_serializeOpUpdateKeyDescription) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdateKeyDescription) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdateKeyDescription") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpUpdatePrimaryRegion struct { +} + +func (*awsAwsjson11_serializeOpUpdatePrimaryRegion) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpUpdatePrimaryRegion) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.UpdatePrimaryRegion") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerify struct { +} + +func (*awsAwsjson11_serializeOpVerify) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerify) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.Verify") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + +type awsAwsjson11_serializeOpVerifyMac struct { +} + +func (*awsAwsjson11_serializeOpVerifyMac) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson11_serializeOpVerifyMac) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*VerifyMacInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.1") + httpBindingEncoder.SetHeader("X-Amz-Target").String("TrentService.VerifyMac") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson11_serializeOpDocumentVerifyMacInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsAwsjson11_serializeDocumentEncryptionContextType(v map[string]string, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + for key := range v { + om := object.Key(key) + om.String(v[key]) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantConstraints(v *types.GrantConstraints, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContextEquals != nil { + ok := object.Key("EncryptionContextEquals") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextEquals, ok); err != nil { + return err + } + } + + if v.EncryptionContextSubset != nil { + ok := object.Key("EncryptionContextSubset") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContextSubset, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeDocumentGrantOperationList(v []types.GrantOperation, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(string(v[i])) + } + return nil +} + +func awsAwsjson11_serializeDocumentGrantTokenList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.TagKey != nil { + ok := object.Key("TagKey") + ok.String(*v.TagKey) + } + + if v.TagValue != nil { + ok := object.Key("TagValue") + ok.String(*v.TagValue) + } + + return nil +} + +func awsAwsjson11_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} + +func awsAwsjson11_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + if err := awsAwsjson11_serializeDocumentTag(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsAwsjson11_serializeOpDocumentCancelKeyDeletionInput(v *CancelKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateAliasInput(v *CreateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.TrustAnchorCertificate != nil { + ok := object.Key("TrustAnchorCertificate") + ok.String(*v.TrustAnchorCertificate) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateGrantInput(v *CreateGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Constraints != nil { + ok := object.Key("Constraints") + if err := awsAwsjson11_serializeDocumentGrantConstraints(v.Constraints, ok); err != nil { + return err + } + } + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Name != nil { + ok := object.Key("Name") + ok.String(*v.Name) + } + + if v.Operations != nil { + ok := object.Key("Operations") + if err := awsAwsjson11_serializeDocumentGrantOperationList(v.Operations, ok); err != nil { + return err + } + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentCreateKeyInput(v *CreateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if len(v.CustomerMasterKeySpec) > 0 { + ok := object.Key("CustomerMasterKeySpec") + ok.String(string(v.CustomerMasterKeySpec)) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if len(v.KeyUsage) > 0 { + ok := object.Key("KeyUsage") + ok.String(string(v.KeyUsage)) + } + + if v.MultiRegion != nil { + ok := object.Key("MultiRegion") + ok.Boolean(*v.MultiRegion) + } + + if len(v.Origin) > 0 { + ok := object.Key("Origin") + ok.String(string(v.Origin)) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDecryptInput(v *DecryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteAliasInput(v *DeleteAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeCustomKeyStoresInput(v *DescribeCustomKeyStoresInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.CustomKeyStoreName != nil { + ok := object.Key("CustomKeyStoreName") + ok.String(*v.CustomKeyStoreName) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDescribeKeyInput(v *DescribeKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyInput(v *DisableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisableKeyRotationInput(v *DisableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyInput(v *EnableKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEnableKeyRotationInput(v *EnableKeyRotationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentEncryptInput(v *EncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if len(v.EncryptionAlgorithm) > 0 { + ok := object.Key("EncryptionAlgorithm") + ok.String(string(v.EncryptionAlgorithm)) + } + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Plaintext != nil { + ok := object.Key("Plaintext") + ok.Base64EncodeBytes(v.Plaintext) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyInput(v *GenerateDataKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairInput(v *GenerateDataKeyPairInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeyPairSpec) > 0 { + ok := object.Key("KeyPairSpec") + ok.String(string(v.KeyPairSpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptionContext != nil { + ok := object.Key("EncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.EncryptionContext, ok); err != nil { + return err + } + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.KeySpec) > 0 { + ok := object.Key("KeySpec") + ok.String(string(v.KeySpec)) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateMacInput(v *GenerateMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGenerateRandomInput(v *GenerateRandomInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.NumberOfBytes != nil { + ok := object.Key("NumberOfBytes") + ok.Integer(*v.NumberOfBytes) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyPolicyInput(v *GetKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetKeyRotationStatusInput(v *GetKeyRotationStatusInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetParametersForImportInput(v *GetParametersForImportInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if len(v.WrappingAlgorithm) > 0 { + ok := object.Key("WrappingAlgorithm") + ok.String(string(v.WrappingAlgorithm)) + } + + if len(v.WrappingKeySpec) > 0 { + ok := object.Key("WrappingKeySpec") + ok.String(string(v.WrappingKeySpec)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentGetPublicKeyInput(v *GetPublicKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentImportKeyMaterialInput(v *ImportKeyMaterialInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.EncryptedKeyMaterial != nil { + ok := object.Key("EncryptedKeyMaterial") + ok.Base64EncodeBytes(v.EncryptedKeyMaterial) + } + + if len(v.ExpirationModel) > 0 { + ok := object.Key("ExpirationModel") + ok.String(string(v.ExpirationModel)) + } + + if v.ImportToken != nil { + ok := object.Key("ImportToken") + ok.Base64EncodeBytes(v.ImportToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.ValidTo != nil { + ok := object.Key("ValidTo") + ok.Double(smithytime.FormatEpochSeconds(*v.ValidTo)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListAliasesInput(v *ListAliasesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListGrantsInput(v *ListGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GranteePrincipal != nil { + ok := object.Key("GranteePrincipal") + ok.String(*v.GranteePrincipal) + } + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeyPoliciesInput(v *ListKeyPoliciesInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListKeysInput(v *ListKeysInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListResourceTagsInput(v *ListResourceTagsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentListRetirableGrantsInput(v *ListRetirableGrantsInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Limit != nil { + ok := object.Key("Limit") + ok.Integer(*v.Limit) + } + + if v.Marker != nil { + ok := object.Key("Marker") + ok.String(*v.Marker) + } + + if v.RetiringPrincipal != nil { + ok := object.Key("RetiringPrincipal") + ok.String(*v.RetiringPrincipal) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentPutKeyPolicyInput(v *PutKeyPolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.PolicyName != nil { + ok := object.Key("PolicyName") + ok.String(*v.PolicyName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReEncryptInput(v *ReEncryptInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CiphertextBlob != nil { + ok := object.Key("CiphertextBlob") + ok.Base64EncodeBytes(v.CiphertextBlob) + } + + if len(v.DestinationEncryptionAlgorithm) > 0 { + ok := object.Key("DestinationEncryptionAlgorithm") + ok.String(string(v.DestinationEncryptionAlgorithm)) + } + + if v.DestinationEncryptionContext != nil { + ok := object.Key("DestinationEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.DestinationEncryptionContext, ok); err != nil { + return err + } + } + + if v.DestinationKeyId != nil { + ok := object.Key("DestinationKeyId") + ok.String(*v.DestinationKeyId) + } + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if len(v.SourceEncryptionAlgorithm) > 0 { + ok := object.Key("SourceEncryptionAlgorithm") + ok.String(string(v.SourceEncryptionAlgorithm)) + } + + if v.SourceEncryptionContext != nil { + ok := object.Key("SourceEncryptionContext") + if err := awsAwsjson11_serializeDocumentEncryptionContextType(v.SourceEncryptionContext, ok); err != nil { + return err + } + } + + if v.SourceKeyId != nil { + ok := object.Key("SourceKeyId") + ok.String(*v.SourceKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentReplicateKeyInput(v *ReplicateKeyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.BypassPolicyLockoutSafetyCheck { + ok := object.Key("BypassPolicyLockoutSafetyCheck") + ok.Boolean(v.BypassPolicyLockoutSafetyCheck) + } + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.ReplicaRegion != nil { + ok := object.Key("ReplicaRegion") + ok.String(*v.ReplicaRegion) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRetireGrantInput(v *RetireGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.GrantToken != nil { + ok := object.Key("GrantToken") + ok.String(*v.GrantToken) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentRevokeGrantInput(v *RevokeGrantInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantId != nil { + ok := object.Key("GrantId") + ok.String(*v.GrantId) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PendingWindowInDays != nil { + ok := object.Key("PendingWindowInDays") + ok.Integer(*v.PendingWindowInDays) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentSignInput(v *SignInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Tags != nil { + ok := object.Key("Tags") + if err := awsAwsjson11_serializeDocumentTagList(v.Tags, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.TagKeys != nil { + ok := object.Key("TagKeys") + if err := awsAwsjson11_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil { + return err + } + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateAliasInput(v *UpdateAliasInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.AliasName != nil { + ok := object.Key("AliasName") + ok.String(*v.AliasName) + } + + if v.TargetKeyId != nil { + ok := object.Key("TargetKeyId") + ok.String(*v.TargetKeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.CloudHsmClusterId != nil { + ok := object.Key("CloudHsmClusterId") + ok.String(*v.CloudHsmClusterId) + } + + if v.CustomKeyStoreId != nil { + ok := object.Key("CustomKeyStoreId") + ok.String(*v.CustomKeyStoreId) + } + + if v.KeyStorePassword != nil { + ok := object.Key("KeyStorePassword") + ok.String(*v.KeyStorePassword) + } + + if v.NewCustomKeyStoreName != nil { + ok := object.Key("NewCustomKeyStoreName") + ok.String(*v.NewCustomKeyStoreName) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.Description != nil { + ok := object.Key("Description") + ok.String(*v.Description) + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.PrimaryRegion != nil { + ok := object.Key("PrimaryRegion") + ok.String(*v.PrimaryRegion) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyInput(v *VerifyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + if len(v.MessageType) > 0 { + ok := object.Key("MessageType") + ok.String(string(v.MessageType)) + } + + if v.Signature != nil { + ok := object.Key("Signature") + ok.Base64EncodeBytes(v.Signature) + } + + if len(v.SigningAlgorithm) > 0 { + ok := object.Key("SigningAlgorithm") + ok.String(string(v.SigningAlgorithm)) + } + + return nil +} + +func awsAwsjson11_serializeOpDocumentVerifyMacInput(v *VerifyMacInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.GrantTokens != nil { + ok := object.Key("GrantTokens") + if err := awsAwsjson11_serializeDocumentGrantTokenList(v.GrantTokens, ok); err != nil { + return err + } + } + + if v.KeyId != nil { + ok := object.Key("KeyId") + ok.String(*v.KeyId) + } + + if v.Mac != nil { + ok := object.Key("Mac") + ok.Base64EncodeBytes(v.Mac) + } + + if len(v.MacAlgorithm) > 0 { + ok := object.Key("MacAlgorithm") + ok.String(string(v.MacAlgorithm)) + } + + if v.Message != nil { + ok := object.Key("Message") + ok.Base64EncodeBytes(v.Message) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go new file mode 100644 index 0000000000..b1fff41157 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/enums.go @@ -0,0 +1,491 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +type AlgorithmSpec string + +// Enum values for AlgorithmSpec +const ( + AlgorithmSpecRsaesPkcs1V15 AlgorithmSpec = "RSAES_PKCS1_V1_5" + AlgorithmSpecRsaesOaepSha1 AlgorithmSpec = "RSAES_OAEP_SHA_1" + AlgorithmSpecRsaesOaepSha256 AlgorithmSpec = "RSAES_OAEP_SHA_256" +) + +// Values returns all known values for AlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (AlgorithmSpec) Values() []AlgorithmSpec { + return []AlgorithmSpec{ + "RSAES_PKCS1_V1_5", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + } +} + +type ConnectionErrorCodeType string + +// Enum values for ConnectionErrorCodeType +const ( + ConnectionErrorCodeTypeInvalidCredentials ConnectionErrorCodeType = "INVALID_CREDENTIALS" + ConnectionErrorCodeTypeClusterNotFound ConnectionErrorCodeType = "CLUSTER_NOT_FOUND" + ConnectionErrorCodeTypeNetworkErrors ConnectionErrorCodeType = "NETWORK_ERRORS" + ConnectionErrorCodeTypeInternalError ConnectionErrorCodeType = "INTERNAL_ERROR" + ConnectionErrorCodeTypeInsufficientCloudhsmHsms ConnectionErrorCodeType = "INSUFFICIENT_CLOUDHSM_HSMS" + ConnectionErrorCodeTypeUserLockedOut ConnectionErrorCodeType = "USER_LOCKED_OUT" + ConnectionErrorCodeTypeUserNotFound ConnectionErrorCodeType = "USER_NOT_FOUND" + ConnectionErrorCodeTypeUserLoggedIn ConnectionErrorCodeType = "USER_LOGGED_IN" + ConnectionErrorCodeTypeSubnetNotFound ConnectionErrorCodeType = "SUBNET_NOT_FOUND" + ConnectionErrorCodeTypeInsufficientFreeAddressesInSubnet ConnectionErrorCodeType = "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET" +) + +// Values returns all known values for ConnectionErrorCodeType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionErrorCodeType) Values() []ConnectionErrorCodeType { + return []ConnectionErrorCodeType{ + "INVALID_CREDENTIALS", + "CLUSTER_NOT_FOUND", + "NETWORK_ERRORS", + "INTERNAL_ERROR", + "INSUFFICIENT_CLOUDHSM_HSMS", + "USER_LOCKED_OUT", + "USER_NOT_FOUND", + "USER_LOGGED_IN", + "SUBNET_NOT_FOUND", + "INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET", + } +} + +type ConnectionStateType string + +// Enum values for ConnectionStateType +const ( + ConnectionStateTypeConnected ConnectionStateType = "CONNECTED" + ConnectionStateTypeConnecting ConnectionStateType = "CONNECTING" + ConnectionStateTypeFailed ConnectionStateType = "FAILED" + ConnectionStateTypeDisconnected ConnectionStateType = "DISCONNECTED" + ConnectionStateTypeDisconnecting ConnectionStateType = "DISCONNECTING" +) + +// Values returns all known values for ConnectionStateType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ConnectionStateType) Values() []ConnectionStateType { + return []ConnectionStateType{ + "CONNECTED", + "CONNECTING", + "FAILED", + "DISCONNECTED", + "DISCONNECTING", + } +} + +type CustomerMasterKeySpec string + +// Enum values for CustomerMasterKeySpec +const ( + CustomerMasterKeySpecRsa2048 CustomerMasterKeySpec = "RSA_2048" + CustomerMasterKeySpecRsa3072 CustomerMasterKeySpec = "RSA_3072" + CustomerMasterKeySpecRsa4096 CustomerMasterKeySpec = "RSA_4096" + CustomerMasterKeySpecEccNistP256 CustomerMasterKeySpec = "ECC_NIST_P256" + CustomerMasterKeySpecEccNistP384 CustomerMasterKeySpec = "ECC_NIST_P384" + CustomerMasterKeySpecEccNistP521 CustomerMasterKeySpec = "ECC_NIST_P521" + CustomerMasterKeySpecEccSecgP256k1 CustomerMasterKeySpec = "ECC_SECG_P256K1" + CustomerMasterKeySpecSymmetricDefault CustomerMasterKeySpec = "SYMMETRIC_DEFAULT" + CustomerMasterKeySpecHmac224 CustomerMasterKeySpec = "HMAC_224" + CustomerMasterKeySpecHmac256 CustomerMasterKeySpec = "HMAC_256" + CustomerMasterKeySpecHmac384 CustomerMasterKeySpec = "HMAC_384" + CustomerMasterKeySpecHmac512 CustomerMasterKeySpec = "HMAC_512" + CustomerMasterKeySpecSm2 CustomerMasterKeySpec = "SM2" +) + +// Values returns all known values for CustomerMasterKeySpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (CustomerMasterKeySpec) Values() []CustomerMasterKeySpec { + return []CustomerMasterKeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type DataKeyPairSpec string + +// Enum values for DataKeyPairSpec +const ( + DataKeyPairSpecRsa2048 DataKeyPairSpec = "RSA_2048" + DataKeyPairSpecRsa3072 DataKeyPairSpec = "RSA_3072" + DataKeyPairSpecRsa4096 DataKeyPairSpec = "RSA_4096" + DataKeyPairSpecEccNistP256 DataKeyPairSpec = "ECC_NIST_P256" + DataKeyPairSpecEccNistP384 DataKeyPairSpec = "ECC_NIST_P384" + DataKeyPairSpecEccNistP521 DataKeyPairSpec = "ECC_NIST_P521" + DataKeyPairSpecEccSecgP256k1 DataKeyPairSpec = "ECC_SECG_P256K1" + DataKeyPairSpecSm2 DataKeyPairSpec = "SM2" +) + +// Values returns all known values for DataKeyPairSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DataKeyPairSpec) Values() []DataKeyPairSpec { + return []DataKeyPairSpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SM2", + } +} + +type DataKeySpec string + +// Enum values for DataKeySpec +const ( + DataKeySpecAes256 DataKeySpec = "AES_256" + DataKeySpecAes128 DataKeySpec = "AES_128" +) + +// Values returns all known values for DataKeySpec. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (DataKeySpec) Values() []DataKeySpec { + return []DataKeySpec{ + "AES_256", + "AES_128", + } +} + +type EncryptionAlgorithmSpec string + +// Enum values for EncryptionAlgorithmSpec +const ( + EncryptionAlgorithmSpecSymmetricDefault EncryptionAlgorithmSpec = "SYMMETRIC_DEFAULT" + EncryptionAlgorithmSpecRsaesOaepSha1 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_1" + EncryptionAlgorithmSpecRsaesOaepSha256 EncryptionAlgorithmSpec = "RSAES_OAEP_SHA_256" + EncryptionAlgorithmSpecSm2pke EncryptionAlgorithmSpec = "SM2PKE" +) + +// Values returns all known values for EncryptionAlgorithmSpec. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (EncryptionAlgorithmSpec) Values() []EncryptionAlgorithmSpec { + return []EncryptionAlgorithmSpec{ + "SYMMETRIC_DEFAULT", + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256", + "SM2PKE", + } +} + +type ExpirationModelType string + +// Enum values for ExpirationModelType +const ( + ExpirationModelTypeKeyMaterialExpires ExpirationModelType = "KEY_MATERIAL_EXPIRES" + ExpirationModelTypeKeyMaterialDoesNotExpire ExpirationModelType = "KEY_MATERIAL_DOES_NOT_EXPIRE" +) + +// Values returns all known values for ExpirationModelType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (ExpirationModelType) Values() []ExpirationModelType { + return []ExpirationModelType{ + "KEY_MATERIAL_EXPIRES", + "KEY_MATERIAL_DOES_NOT_EXPIRE", + } +} + +type GrantOperation string + +// Enum values for GrantOperation +const ( + GrantOperationDecrypt GrantOperation = "Decrypt" + GrantOperationEncrypt GrantOperation = "Encrypt" + GrantOperationGenerateDataKey GrantOperation = "GenerateDataKey" + GrantOperationGenerateDataKeyWithoutPlaintext GrantOperation = "GenerateDataKeyWithoutPlaintext" + GrantOperationReEncryptFrom GrantOperation = "ReEncryptFrom" + GrantOperationReEncryptTo GrantOperation = "ReEncryptTo" + GrantOperationSign GrantOperation = "Sign" + GrantOperationVerify GrantOperation = "Verify" + GrantOperationGetPublicKey GrantOperation = "GetPublicKey" + GrantOperationCreateGrant GrantOperation = "CreateGrant" + GrantOperationRetireGrant GrantOperation = "RetireGrant" + GrantOperationDescribeKey GrantOperation = "DescribeKey" + GrantOperationGenerateDataKeyPair GrantOperation = "GenerateDataKeyPair" + GrantOperationGenerateDataKeyPairWithoutPlaintext GrantOperation = "GenerateDataKeyPairWithoutPlaintext" + GrantOperationGenerateMac GrantOperation = "GenerateMac" + GrantOperationVerifyMac GrantOperation = "VerifyMac" +) + +// Values returns all known values for GrantOperation. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (GrantOperation) Values() []GrantOperation { + return []GrantOperation{ + "Decrypt", + "Encrypt", + "GenerateDataKey", + "GenerateDataKeyWithoutPlaintext", + "ReEncryptFrom", + "ReEncryptTo", + "Sign", + "Verify", + "GetPublicKey", + "CreateGrant", + "RetireGrant", + "DescribeKey", + "GenerateDataKeyPair", + "GenerateDataKeyPairWithoutPlaintext", + "GenerateMac", + "VerifyMac", + } +} + +type KeyManagerType string + +// Enum values for KeyManagerType +const ( + KeyManagerTypeAws KeyManagerType = "AWS" + KeyManagerTypeCustomer KeyManagerType = "CUSTOMER" +) + +// Values returns all known values for KeyManagerType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (KeyManagerType) Values() []KeyManagerType { + return []KeyManagerType{ + "AWS", + "CUSTOMER", + } +} + +type KeySpec string + +// Enum values for KeySpec +const ( + KeySpecRsa2048 KeySpec = "RSA_2048" + KeySpecRsa3072 KeySpec = "RSA_3072" + KeySpecRsa4096 KeySpec = "RSA_4096" + KeySpecEccNistP256 KeySpec = "ECC_NIST_P256" + KeySpecEccNistP384 KeySpec = "ECC_NIST_P384" + KeySpecEccNistP521 KeySpec = "ECC_NIST_P521" + KeySpecEccSecgP256k1 KeySpec = "ECC_SECG_P256K1" + KeySpecSymmetricDefault KeySpec = "SYMMETRIC_DEFAULT" + KeySpecHmac224 KeySpec = "HMAC_224" + KeySpecHmac256 KeySpec = "HMAC_256" + KeySpecHmac384 KeySpec = "HMAC_384" + KeySpecHmac512 KeySpec = "HMAC_512" + KeySpecSm2 KeySpec = "SM2" +) + +// Values returns all known values for KeySpec. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (KeySpec) Values() []KeySpec { + return []KeySpec{ + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", + "SYMMETRIC_DEFAULT", + "HMAC_224", + "HMAC_256", + "HMAC_384", + "HMAC_512", + "SM2", + } +} + +type KeyState string + +// Enum values for KeyState +const ( + KeyStateCreating KeyState = "Creating" + KeyStateEnabled KeyState = "Enabled" + KeyStateDisabled KeyState = "Disabled" + KeyStatePendingDeletion KeyState = "PendingDeletion" + KeyStatePendingImport KeyState = "PendingImport" + KeyStatePendingReplicaDeletion KeyState = "PendingReplicaDeletion" + KeyStateUnavailable KeyState = "Unavailable" + KeyStateUpdating KeyState = "Updating" +) + +// Values returns all known values for KeyState. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this +// slice is not guaranteed to be stable across updates. +func (KeyState) Values() []KeyState { + return []KeyState{ + "Creating", + "Enabled", + "Disabled", + "PendingDeletion", + "PendingImport", + "PendingReplicaDeletion", + "Unavailable", + "Updating", + } +} + +type KeyUsageType string + +// Enum values for KeyUsageType +const ( + KeyUsageTypeSignVerify KeyUsageType = "SIGN_VERIFY" + KeyUsageTypeEncryptDecrypt KeyUsageType = "ENCRYPT_DECRYPT" + KeyUsageTypeGenerateVerifyMac KeyUsageType = "GENERATE_VERIFY_MAC" +) + +// Values returns all known values for KeyUsageType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (KeyUsageType) Values() []KeyUsageType { + return []KeyUsageType{ + "SIGN_VERIFY", + "ENCRYPT_DECRYPT", + "GENERATE_VERIFY_MAC", + } +} + +type MacAlgorithmSpec string + +// Enum values for MacAlgorithmSpec +const ( + MacAlgorithmSpecHmacSha224 MacAlgorithmSpec = "HMAC_SHA_224" + MacAlgorithmSpecHmacSha256 MacAlgorithmSpec = "HMAC_SHA_256" + MacAlgorithmSpecHmacSha384 MacAlgorithmSpec = "HMAC_SHA_384" + MacAlgorithmSpecHmacSha512 MacAlgorithmSpec = "HMAC_SHA_512" +) + +// Values returns all known values for MacAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MacAlgorithmSpec) Values() []MacAlgorithmSpec { + return []MacAlgorithmSpec{ + "HMAC_SHA_224", + "HMAC_SHA_256", + "HMAC_SHA_384", + "HMAC_SHA_512", + } +} + +type MessageType string + +// Enum values for MessageType +const ( + MessageTypeRaw MessageType = "RAW" + MessageTypeDigest MessageType = "DIGEST" +) + +// Values returns all known values for MessageType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (MessageType) Values() []MessageType { + return []MessageType{ + "RAW", + "DIGEST", + } +} + +type MultiRegionKeyType string + +// Enum values for MultiRegionKeyType +const ( + MultiRegionKeyTypePrimary MultiRegionKeyType = "PRIMARY" + MultiRegionKeyTypeReplica MultiRegionKeyType = "REPLICA" +) + +// Values returns all known values for MultiRegionKeyType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (MultiRegionKeyType) Values() []MultiRegionKeyType { + return []MultiRegionKeyType{ + "PRIMARY", + "REPLICA", + } +} + +type OriginType string + +// Enum values for OriginType +const ( + OriginTypeAwsKms OriginType = "AWS_KMS" + OriginTypeExternal OriginType = "EXTERNAL" + OriginTypeAwsCloudhsm OriginType = "AWS_CLOUDHSM" +) + +// Values returns all known values for OriginType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (OriginType) Values() []OriginType { + return []OriginType{ + "AWS_KMS", + "EXTERNAL", + "AWS_CLOUDHSM", + } +} + +type SigningAlgorithmSpec string + +// Enum values for SigningAlgorithmSpec +const ( + SigningAlgorithmSpecRsassaPssSha256 SigningAlgorithmSpec = "RSASSA_PSS_SHA_256" + SigningAlgorithmSpecRsassaPssSha384 SigningAlgorithmSpec = "RSASSA_PSS_SHA_384" + SigningAlgorithmSpecRsassaPssSha512 SigningAlgorithmSpec = "RSASSA_PSS_SHA_512" + SigningAlgorithmSpecRsassaPkcs1V15Sha256 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_256" + SigningAlgorithmSpecRsassaPkcs1V15Sha384 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_384" + SigningAlgorithmSpecRsassaPkcs1V15Sha512 SigningAlgorithmSpec = "RSASSA_PKCS1_V1_5_SHA_512" + SigningAlgorithmSpecEcdsaSha256 SigningAlgorithmSpec = "ECDSA_SHA_256" + SigningAlgorithmSpecEcdsaSha384 SigningAlgorithmSpec = "ECDSA_SHA_384" + SigningAlgorithmSpecEcdsaSha512 SigningAlgorithmSpec = "ECDSA_SHA_512" + SigningAlgorithmSpecSm2dsa SigningAlgorithmSpec = "SM2DSA" +) + +// Values returns all known values for SigningAlgorithmSpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (SigningAlgorithmSpec) Values() []SigningAlgorithmSpec { + return []SigningAlgorithmSpec{ + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "ECDSA_SHA_256", + "ECDSA_SHA_384", + "ECDSA_SHA_512", + "SM2DSA", + } +} + +type WrappingKeySpec string + +// Enum values for WrappingKeySpec +const ( + WrappingKeySpecRsa2048 WrappingKeySpec = "RSA_2048" +) + +// Values returns all known values for WrappingKeySpec. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (WrappingKeySpec) Values() []WrappingKeySpec { + return []WrappingKeySpec{ + "RSA_2048", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go new file mode 100644 index 0000000000..e163526075 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/errors.go @@ -0,0 +1,804 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// The request was rejected because it attempted to create a resource that already +// exists. +type AlreadyExistsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *AlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AlreadyExistsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AlreadyExistsException) ErrorCode() string { return "AlreadyExistsException" } +func (e *AlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster is already +// associated with a custom key store or it shares a backup history with a cluster +// that is associated with a custom key store. Each custom key store must be +// associated with a different CloudHSM cluster. Clusters that share a backup +// history have the same cluster certificate. To view the cluster certificate of a +// cluster, use the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. +type CloudHsmClusterInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInUseException) ErrorCode() string { return "CloudHsmClusterInUseException" } +func (e *CloudHsmClusterInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the associated CloudHSM cluster did not meet +// the configuration requirements for a custom key store. +// +// * The cluster must be +// configured with private subnets in at least two different Availability Zones in +// the Region. +// +// * The security group for the cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) +// (cloudhsm-cluster--sg) must include inbound rules and outbound rules that allow +// TCP traffic on ports 2223-2225. The Source in the inbound rules and the +// Destination in the outbound rules must match the security group ID. These rules +// are set by default when you create the cluster. Do not delete or change them. To +// get information about a particular security group, use the +// DescribeSecurityGroups +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) +// operation. +// +// * The cluster must contain at least as many HSMs as the operation +// requires. To add HSMs, use the CloudHSM CreateHsm +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) +// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey +// operations, the CloudHSM cluster must have at least two active HSMs, each in a +// different Availability Zone. For the ConnectCustomKeyStore operation, the +// CloudHSM must contain at least one active HSM. +// +// For information about the +// requirements for an CloudHSM cluster that is associated with a custom key store, +// see Assemble the Prerequisites +// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) +// in the Key Management Service Developer Guide. For information about creating a +// private subnet for an CloudHSM cluster, see Create a Private Subnet +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) in +// the CloudHSM User Guide. For information about cluster security groups, see +// Configure a Default Security Group +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) in the +// CloudHSM User Guide . +type CloudHsmClusterInvalidConfigurationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterInvalidConfigurationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorCode() string { + return "CloudHsmClusterInvalidConfigurationException" +} +func (e *CloudHsmClusterInvalidConfigurationException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the CloudHSM cluster that is associated with +// the custom key store is not active. Initialize and activate the cluster and try +// the command again. For detailed instructions, see Getting Started +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) in +// the CloudHSM User Guide. +type CloudHsmClusterNotActiveException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotActiveException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotActiveException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotActiveException) ErrorCode() string { + return "CloudHsmClusterNotActiveException" +} +func (e *CloudHsmClusterNotActiveException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find the CloudHSM cluster with the +// specified cluster ID. Retry the request with a different cluster ID. +type CloudHsmClusterNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotFoundException) ErrorCode() string { + return "CloudHsmClusterNotFoundException" +} +func (e *CloudHsmClusterNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified CloudHSM cluster has a different +// cluster certificate than the original cluster. You cannot use the operation to +// specify an unrelated cluster. Specify a cluster that shares a backup history +// with the original cluster. This includes clusters that were created from a +// backup of the current cluster, and clusters that were created from the same +// backup that produced the current cluster. Clusters that share a backup history +// have the same cluster certificate. To view the cluster certificate of a cluster, +// use the DescribeClusters +// (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) +// operation. +type CloudHsmClusterNotRelatedException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CloudHsmClusterNotRelatedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CloudHsmClusterNotRelatedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CloudHsmClusterNotRelatedException) ErrorCode() string { + return "CloudHsmClusterNotRelatedException" +} +func (e *CloudHsmClusterNotRelatedException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the custom key store contains KMS keys. After +// verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion +// operation to delete the KMS keys. After they are deleted, you can delete the +// custom key store. +type CustomKeyStoreHasCMKsException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreHasCMKsException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreHasCMKsException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreHasCMKsException) ErrorCode() string { return "CustomKeyStoreHasCMKsException" } +func (e *CustomKeyStoreHasCMKsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because of the ConnectionState of the custom key store. +// To get the ConnectionState of a custom key store, use the +// DescribeCustomKeyStores operation. This exception is thrown under the following +// conditions: +// +// * You requested the CreateKey or GenerateRandom operation in a +// custom key store that is not connected. These operations are valid only when the +// custom key store ConnectionState is CONNECTED. +// +// * You requested the +// UpdateCustomKeyStore or DeleteCustomKeyStore operation on a custom key store +// that is not disconnected. This operation is valid only when the custom key store +// ConnectionState is DISCONNECTED. +// +// * You requested the ConnectCustomKeyStore +// operation on a custom key store with a ConnectionState of DISCONNECTING or +// FAILED. This operation is valid for all other ConnectionState values. +type CustomKeyStoreInvalidStateException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreInvalidStateException) ErrorCode() string { + return "CustomKeyStoreInvalidStateException" +} +func (e *CustomKeyStoreInvalidStateException) ErrorFault() smithy.ErrorFault { + return smithy.FaultClient +} + +// The request was rejected because the specified custom key store name is already +// assigned to another custom key store in the account. Try again with a custom key +// store name that is unique in the account. +type CustomKeyStoreNameInUseException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNameInUseException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNameInUseException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNameInUseException) ErrorCode() string { + return "CustomKeyStoreNameInUseException" +} +func (e *CustomKeyStoreNameInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because KMS cannot find a custom key store with the +// specified key store name or ID. +type CustomKeyStoreNotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *CustomKeyStoreNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *CustomKeyStoreNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *CustomKeyStoreNotFoundException) ErrorCode() string { + return "CustomKeyStoreNotFoundException" +} +func (e *CustomKeyStoreNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The system timed out while trying to fulfill the request. The request can be +// retried. +type DependencyTimeoutException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DependencyTimeoutException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DependencyTimeoutException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DependencyTimeoutException) ErrorCode() string { return "DependencyTimeoutException" } +func (e *DependencyTimeoutException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the specified KMS key is not enabled. +type DisabledException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *DisabledException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DisabledException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DisabledException) ErrorCode() string { return "DisabledException" } +func (e *DisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified import token is expired. Use +// GetParametersForImport to get a new import token and public key, use the new +// public key to encrypt the key material, and then try the request again. +type ExpiredImportTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *ExpiredImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredImportTokenException) ErrorCode() string { return "ExpiredImportTokenException" } +func (e *ExpiredImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key cannot decrypt the data. +// The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request must +// identify the same KMS key that was used to encrypt the ciphertext. +type IncorrectKeyException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyException) ErrorCode() string { return "IncorrectKeyException" } +func (e *IncorrectKeyException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the key material in the request is, expired, +// invalid, or is not the same key material that was previously imported into this +// KMS key. +type IncorrectKeyMaterialException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectKeyMaterialException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectKeyMaterialException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectKeyMaterialException) ErrorCode() string { return "IncorrectKeyMaterialException" } +func (e *IncorrectKeyMaterialException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the trust anchor certificate in the request is +// not the trust anchor certificate for the specified CloudHSM cluster. When you +// initialize the cluster +// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), +// you create the trust anchor certificate and save it in the customerCA.crt file. +type IncorrectTrustAnchorException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *IncorrectTrustAnchorException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *IncorrectTrustAnchorException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *IncorrectTrustAnchorException) ErrorCode() string { return "IncorrectTrustAnchorException" } +func (e *IncorrectTrustAnchorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified alias name is not valid. +type InvalidAliasNameException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidAliasNameException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidAliasNameException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidAliasNameException) ErrorCode() string { return "InvalidAliasNameException" } +func (e *InvalidAliasNameException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified ARN, or an ARN in a key policy, is +// not valid. +type InvalidArnException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidArnException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidArnException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidArnException) ErrorCode() string { return "InvalidArnException" } +func (e *InvalidArnException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// From the Decrypt or ReEncrypt operation, the request was rejected because the +// specified ciphertext, or additional authenticated data incorporated into the +// ciphertext, such as the encryption context, is corrupted, missing, or otherwise +// invalid. From the ImportKeyMaterial operation, the request was rejected because +// KMS could not decrypt the encrypted (wrapped) key material. +type InvalidCiphertextException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidCiphertextException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidCiphertextException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidCiphertextException) ErrorCode() string { return "InvalidCiphertextException" } +func (e *InvalidCiphertextException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified GrantId is not valid. +type InvalidGrantIdException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantIdException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantIdException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantIdException) ErrorCode() string { return "InvalidGrantIdException" } +func (e *InvalidGrantIdException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified grant token is not valid. +type InvalidGrantTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantTokenException) ErrorCode() string { return "InvalidGrantTokenException" } +func (e *InvalidGrantTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the provided import token is invalid or is +// associated with a different KMS key. +type InvalidImportTokenException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidImportTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidImportTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidImportTokenException) ErrorCode() string { return "InvalidImportTokenException" } +func (e *InvalidImportTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected for one of the following reasons: +// +// * The KeyUsage value +// of the KMS key is incompatible with the API operation. +// +// * The encryption +// algorithm or signing algorithm specified for the operation is incompatible with +// the type of key material in the KMS key (KeySpec). +// +// For encrypting, decrypting, +// re-encrypting, and generating data keys, the KeyUsage must be ENCRYPT_DECRYPT. +// For signing and verifying messages, the KeyUsage must be SIGN_VERIFY. For +// generating and verifying message authentication codes (MACs), the KeyUsage must +// be GENERATE_VERIFY_MAC. To find the KeyUsage of a KMS key, use the DescribeKey +// operation. To find the encryption or signing algorithms supported for a +// particular KMS key, use the DescribeKey operation. +type InvalidKeyUsageException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidKeyUsageException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidKeyUsageException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidKeyUsageException) ErrorCode() string { return "InvalidKeyUsageException" } +func (e *InvalidKeyUsageException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the marker that specifies where pagination +// should next begin is not valid. +type InvalidMarkerException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *InvalidMarkerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidMarkerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidMarkerException) ErrorCode() string { return "InvalidMarkerException" } +func (e *InvalidMarkerException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified KMS key was not available. You +// can retry the request. +type KeyUnavailableException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KeyUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KeyUnavailableException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KeyUnavailableException) ErrorCode() string { return "KeyUnavailableException" } +func (e *KeyUnavailableException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because an internal exception occurred. The request can +// be retried. +type KMSInternalException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInternalException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInternalException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInternalException) ErrorCode() string { return "KMSInternalException" } +func (e *KMSInternalException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// The request was rejected because the HMAC verification failed. HMAC verification +// fails when the HMAC computed by using the specified message, HMAC KMS key, and +// MAC algorithm does not match the HMAC specified in the request. +type KMSInvalidMacException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidMacException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidMacException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidMacException) ErrorCode() string { return "KMSInvalidMacException" } +func (e *KMSInvalidMacException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the signature verification failed. Signature +// verification fails when it cannot confirm that signature was produced by signing +// the specified message with the specified KMS key and signing algorithm. +type KMSInvalidSignatureException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidSignatureException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidSignatureException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidSignatureException) ErrorCode() string { return "KMSInvalidSignatureException" } +func (e *KMSInvalidSignatureException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the state of the specified resource is not +// valid for this request. For more information about how key state affects the use +// of a KMS key, see Key states of KMS keys +// (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the +// Key Management Service Developer Guide . +type KMSInvalidStateException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *KMSInvalidStateException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *KMSInvalidStateException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *KMSInvalidStateException) ErrorCode() string { return "KMSInvalidStateException" } +func (e *KMSInvalidStateException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a quota was exceeded. For more information, see +// Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) in +// the Key Management Service Developer Guide. +type LimitExceededException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *LimitExceededException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *LimitExceededException) ErrorCode() string { return "LimitExceededException" } +func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified policy is not syntactically or +// semantically correct. +type MalformedPolicyDocumentException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *MalformedPolicyDocumentException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MalformedPolicyDocumentException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MalformedPolicyDocumentException) ErrorCode() string { + return "MalformedPolicyDocumentException" +} +func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because the specified entity or resource could not be +// found. +type NotFoundException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *NotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *NotFoundException) ErrorCode() string { return "NotFoundException" } +func (e *NotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because one or more tags are not valid. +type TagException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *TagException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TagException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TagException) ErrorCode() string { return "TagException" } +func (e *TagException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The request was rejected because a specified parameter is not supported or a +// specified resource is not valid for this operation. +type UnsupportedOperationException struct { + Message *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedOperationException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedOperationException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedOperationException) ErrorCode() string { return "UnsupportedOperationException" } +func (e *UnsupportedOperationException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go new file mode 100644 index 0000000000..6dac549dca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/types/types.go @@ -0,0 +1,436 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" + "time" +) + +// Contains information about an alias. +type AliasListEntry struct { + + // String that contains the key ARN. + AliasArn *string + + // String that contains the alias. This value begins with alias/. + AliasName *string + + // Date and time that the alias was most recently created in the account and + // Region. Formatted as Unix time. + CreationDate *time.Time + + // Date and time that the alias was most recently associated with a KMS key in the + // account and Region. Formatted as Unix time. + LastUpdatedDate *time.Time + + // String that contains the key identifier of the KMS key associated with the + // alias. + TargetKeyId *string + + noSmithyDocumentSerde +} + +// Contains information about each custom key store in the custom key store list. +type CustomKeyStoresListEntry struct { + + // A unique identifier for the CloudHSM cluster that is associated with the custom + // key store. + CloudHsmClusterId *string + + // Describes the connection error. This field appears in the response only when the + // ConnectionState is FAILED. For help resolving these errors, see How to Fix a + // Connection Failure + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in Key Management Service Developer Guide. Valid values are: + // + // * + // CLUSTER_NOT_FOUND - KMS cannot find the CloudHSM cluster with the specified + // cluster ID. + // + // * INSUFFICIENT_CLOUDHSM_HSMS - The associated CloudHSM cluster does + // not contain any active HSMs. To connect a custom key store to its CloudHSM + // cluster, the cluster must contain at least one active HSM. + // + // * INTERNAL_ERROR - + // KMS could not complete the request due to an internal error. Retry the request. + // For ConnectCustomKeyStore requests, disconnect the custom key store before + // trying to connect again. + // + // * INVALID_CREDENTIALS - KMS does not have the correct + // password for the kmsuser crypto user in the CloudHSM cluster. Before you can + // connect your custom key store to its CloudHSM cluster, you must change the + // kmsuser account password and update the key store password value for the custom + // key store. + // + // * NETWORK_ERRORS - Network errors are preventing KMS from connecting + // to the custom key store. + // + // * SUBNET_NOT_FOUND - A subnet in the CloudHSM cluster + // configuration was deleted. If KMS cannot find all of the subnets in the cluster + // configuration, attempts to connect the custom key store to the CloudHSM cluster + // fail. To fix this error, create a cluster from a recent backup and associate it + // with your custom key store. (This process creates a new cluster configuration + // with a VPC and private subnets.) For details, see How to Fix a Connection + // Failure + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // in the Key Management Service Developer Guide. + // + // * USER_LOCKED_OUT - The kmsuser + // CU account is locked out of the associated CloudHSM cluster due to too many + // failed password attempts. Before you can connect your custom key store to its + // CloudHSM cluster, you must change the kmsuser account password and update the + // key store password value for the custom key store. + // + // * USER_LOGGED_IN - The + // kmsuser CU account is logged into the the associated CloudHSM cluster. This + // prevents KMS from rotating the kmsuser account password and logging into the + // cluster. Before you can connect your custom key store to its CloudHSM cluster, + // you must log the kmsuser CU out of the cluster. If you changed the kmsuser + // password to log into the cluster, you must also and update the key store + // password value for the custom key store. For help, see How to Log Out and + // Reconnect + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) + // in the Key Management Service Developer Guide. + // + // * USER_NOT_FOUND - KMS cannot + // find a kmsuser CU account in the associated CloudHSM cluster. Before you can + // connect your custom key store to its CloudHSM cluster, you must create a kmsuser + // CU account in the cluster, and then update the key store password value for the + // custom key store. + ConnectionErrorCode ConnectionErrorCodeType + + // Indicates whether the custom key store is connected to its CloudHSM cluster. You + // can create and use KMS keys in your custom key stores only when its connection + // state is CONNECTED. The value is DISCONNECTED if the key store has never been + // connected or you use the DisconnectCustomKeyStore operation to disconnect it. If + // the value is CONNECTED but you are having trouble using the custom key store, + // make sure that its associated CloudHSM cluster is active and contains at least + // one active HSM. A value of FAILED indicates that an attempt to connect was + // unsuccessful. The ConnectionErrorCode field in the response indicates the cause + // of the failure. For help resolving a connection failure, see Troubleshooting a + // Custom Key Store + // (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) in the + // Key Management Service Developer Guide. + ConnectionState ConnectionStateType + + // The date and time when the custom key store was created. + CreationDate *time.Time + + // A unique identifier for the custom key store. + CustomKeyStoreId *string + + // The user-specified friendly name for the custom key store. + CustomKeyStoreName *string + + // The trust anchor certificate of the associated CloudHSM cluster. When you + // initialize the cluster + // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), + // you create this certificate and save it in the customerCA.crt file. + TrustAnchorCertificate *string + + noSmithyDocumentSerde +} + +// Use this structure to allow cryptographic operations +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// in the grant only when the operation request includes the specified encryption +// context +// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). +// KMS applies the grant constraints only to cryptographic operations that support +// an encryption context, that is, all cryptographic operations with a symmetric +// encryption KMS key +// (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). +// Grant constraints are not applied to operations that do not support an +// encryption context, such as cryptographic operations with HMAC KMS keys or +// asymmetric KMS keys, and management operations, such as DescribeKey or +// RetireGrant. In a cryptographic operation, the encryption context in the +// decryption operation must be an exact, case-sensitive match for the keys and +// values in the encryption context of the encryption operation. Only the order of +// the pairs can vary. However, in a grant constraint, the key in each key-value +// pair is not case sensitive, but the value is case sensitive. To avoid confusion, +// do not use multiple encryption context pairs that differ only by case. To +// require a fully case-sensitive encryption context, use the +// kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM or key +// policy. For details, see kms:EncryptionContext: +// (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) +// in the Key Management Service Developer Guide . +type GrantConstraints struct { + + // A list of key-value pairs that must match the encryption context in the + // cryptographic operation + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the operation only when the encryption context in the + // request is the same as the encryption context specified in this constraint. + EncryptionContextEquals map[string]string + + // A list of key-value pairs that must be included in the encryption context of the + // cryptographic operation + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the cryptographic operation only when the encryption + // context in the request includes the key-value pairs specified in this + // constraint, although it can include additional key-value pairs. + EncryptionContextSubset map[string]string + + noSmithyDocumentSerde +} + +// Contains information about a grant. +type GrantListEntry struct { + + // A list of key-value pairs that must be present in the encryption context of + // certain subsequent operations that the grant allows. + Constraints *GrantConstraints + + // The date and time when the grant was created. + CreationDate *time.Time + + // The unique identifier for the grant. + GrantId *string + + // The identity that gets the permissions in the grant. The GranteePrincipal field + // in the ListGrants response usually contains the user or role designated as the + // grantee principal in the grant. However, when the grantee principal in the grant + // is an Amazon Web Services service, the GranteePrincipal field contains the + // service principal + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), + // which might represent several different grantee principals. + GranteePrincipal *string + + // The Amazon Web Services account under which the grant was issued. + IssuingAccount *string + + // The unique identifier for the KMS key to which the grant applies. + KeyId *string + + // The friendly name that identifies the grant. If a name was provided in the + // CreateGrant request, that name is returned. Otherwise this value is null. + Name *string + + // The list of operations permitted by the grant. + Operations []GrantOperation + + // The principal that can retire the grant. + RetiringPrincipal *string + + noSmithyDocumentSerde +} + +// Contains information about each entry in the key list. +type KeyListEntry struct { + + // ARN of the key. + KeyArn *string + + // Unique identifier of the key. + KeyId *string + + noSmithyDocumentSerde +} + +// Contains metadata about a KMS key. This data type is used as a response element +// for the CreateKey and DescribeKey operations. +type KeyMetadata struct { + + // The globally unique identifier for the KMS key. + // + // This member is required. + KeyId *string + + // The twelve-digit account ID of the Amazon Web Services account that owns the KMS + // key. + AWSAccountId *string + + // The Amazon Resource Name (ARN) of the KMS key. For examples, see Key Management + // Service (KMS) + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) + // in the Example ARNs section of the Amazon Web Services General Reference. + Arn *string + + // The cluster ID of the CloudHSM cluster that contains the key material for the + // KMS key. When you create a KMS key in a custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), + // KMS creates the key material for the KMS key in the associated CloudHSM cluster. + // This value is present only when the KMS key is created in a custom key store. + CloudHsmClusterId *string + + // The date and time when the KMS key was created. + CreationDate *time.Time + + // A unique identifier for the custom key store + // (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) + // that contains the KMS key. This value is present only when the KMS key is + // created in a custom key store. + CustomKeyStoreId *string + + // Instead, use the KeySpec field. The KeySpec and CustomerMasterKeySpec fields + // have the same value. We recommend that you use the KeySpec field in your code. + // However, to avoid breaking changes, KMS will support both fields. + // + // Deprecated: This field has been deprecated. Instead, use the KeySpec field. + CustomerMasterKeySpec CustomerMasterKeySpec + + // The date and time after which KMS deletes this KMS key. This value is present + // only when the KMS key is scheduled for deletion, that is, when its KeyState is + // PendingDeletion. When the primary key in a multi-Region key is scheduled for + // deletion but still has replica keys, its key state is PendingReplicaDeletion and + // the length of its waiting period is displayed in the PendingDeletionWindowInDays + // field. + DeletionDate *time.Time + + // The description of the KMS key. + Description *string + + // Specifies whether the KMS key is enabled. When KeyState is Enabled this value is + // true, otherwise it is false. + Enabled bool + + // The encryption algorithms that the KMS key supports. You cannot use the KMS key + // with other encryption algorithms within KMS. This value is present only when the + // KeyUsage of the KMS key is ENCRYPT_DECRYPT. + EncryptionAlgorithms []EncryptionAlgorithmSpec + + // Specifies whether the KMS key's key material expires. This value is present only + // when Origin is EXTERNAL, otherwise this value is omitted. + ExpirationModel ExpirationModelType + + // The manager of the KMS key. KMS keys in your Amazon Web Services account are + // either customer managed or Amazon Web Services managed. For more information + // about the difference, see KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) + // in the Key Management Service Developer Guide. + KeyManager KeyManagerType + + // Describes the type of key material in the KMS key. + KeySpec KeySpec + + // The current status of the KMS key. For more information about how key state + // affects the use of a KMS key, see Key states of KMS keys + // (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) in the + // Key Management Service Developer Guide. + KeyState KeyState + + // The cryptographic operations + // (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the KMS key. + KeyUsage KeyUsageType + + // The message authentication code (MAC) algorithm that the HMAC KMS key supports. + // This value is present only when the KeyUsage of the KMS key is + // GENERATE_VERIFY_MAC. + MacAlgorithms []MacAlgorithmSpec + + // Indicates whether the KMS key is a multi-Region (True) or regional (False) key. + // This value is True for multi-Region primary and replica keys and False for + // regional KMS keys. For more information about multi-Region keys, see + // Multi-Region keys in KMS + // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) + // in the Key Management Service Developer Guide. + MultiRegion *bool + + // Lists the primary and replica keys in same multi-Region key. This field is + // present only when the value of the MultiRegion field is True. For more + // information about any listed KMS key, use the DescribeKey operation. + // + // * + // MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA key. + // + // * + // PrimaryKey displays the key ARN and Region of the primary key. This field + // displays the current KMS key if it is the primary key. + // + // * ReplicaKeys displays + // the key ARNs and Regions of all replica keys. This field includes the current + // KMS key if it is a replica key. + MultiRegionConfiguration *MultiRegionConfiguration + + // The source of the key material for the KMS key. When this value is AWS_KMS, KMS + // created the key material. When this value is EXTERNAL, the key material was + // imported or the KMS key doesn't have any key material. When this value is + // AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated + // with a custom key store. + Origin OriginType + + // The waiting period before the primary key in a multi-Region key is deleted. This + // waiting period begins when the last of its replica keys is deleted. This value + // is present only when the KeyState of the KMS key is PendingReplicaDeletion. That + // indicates that the KMS key is the primary key in a multi-Region key, it is + // scheduled for deletion, and it still has existing replica keys. When a + // single-Region KMS key or a multi-Region replica key is scheduled for deletion, + // its deletion date is displayed in the DeletionDate field. However, when the + // primary key in a multi-Region key is scheduled for deletion, its waiting period + // doesn't begin until all of its replica keys are deleted. This value displays + // that waiting period. When the last replica key in the multi-Region key is + // deleted, the KeyState of the scheduled primary key changes from + // PendingReplicaDeletion to PendingDeletion and the deletion date appears in the + // DeletionDate field. + PendingDeletionWindowInDays *int32 + + // The signing algorithms that the KMS key supports. You cannot use the KMS key + // with other signing algorithms within KMS. This field appears only when the + // KeyUsage of the KMS key is SIGN_VERIFY. + SigningAlgorithms []SigningAlgorithmSpec + + // The time at which the imported key material expires. When the key material + // expires, KMS deletes the key material and the KMS key becomes unusable. This + // value is present only for KMS keys whose Origin is EXTERNAL and whose + // ExpirationModel is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. + ValidTo *time.Time + + noSmithyDocumentSerde +} + +// Describes the configuration of this multi-Region key. This field appears only +// when the KMS key is a primary or replica of a multi-Region key. For more +// information about any listed KMS key, use the DescribeKey operation. +type MultiRegionConfiguration struct { + + // Indicates whether the KMS key is a PRIMARY or REPLICA key. + MultiRegionKeyType MultiRegionKeyType + + // Displays the key ARN and Region of the primary key. This field includes the + // current KMS key if it is the primary key. + PrimaryKey *MultiRegionKey + + // displays the key ARNs and Regions of all replica keys. This field includes the + // current KMS key if it is a replica key. + ReplicaKeys []MultiRegionKey + + noSmithyDocumentSerde +} + +// Describes the primary or replica key in a multi-Region key. +type MultiRegionKey struct { + + // Displays the key ARN of a primary or replica key of a multi-Region key. + Arn *string + + // Displays the Amazon Web Services Region of a primary or replica key in a + // multi-Region key. + Region *string + + noSmithyDocumentSerde +} + +// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and tag +// values are both required, but tag values can be empty (null) strings. For +// information about the rules that apply to tag keys and tag values, see +// User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// in the Amazon Web Services Billing and Cost Management User Guide. +type Tag struct { + + // The key of the tag. + // + // This member is required. + TagKey *string + + // The value of the tag. + // + // This member is required. + TagValue *string + + noSmithyDocumentSerde +} + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go new file mode 100644 index 0000000000..13cec68649 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/kms/validators.go @@ -0,0 +1,1905 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package kms + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCancelKeyDeletion struct { +} + +func (*validateOpCancelKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCancelKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CancelKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCancelKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpConnectCustomKeyStore struct { +} + +func (*validateOpConnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpConnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ConnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpConnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateAlias struct { +} + +func (*validateOpCreateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateCustomKeyStore struct { +} + +func (*validateOpCreateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateGrant struct { +} + +func (*validateOpCreateGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpCreateKey struct { +} + +func (*validateOpCreateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDecrypt struct { +} + +func (*validateOpDecrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDecrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DecryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDecryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteAlias struct { +} + +func (*validateOpDeleteAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteCustomKeyStore struct { +} + +func (*validateOpDeleteCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDeleteImportedKeyMaterial struct { +} + +func (*validateOpDeleteImportedKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteImportedKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteImportedKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteImportedKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDescribeKey struct { +} + +func (*validateOpDescribeKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKey struct { +} + +func (*validateOpDisableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisableKeyRotation struct { +} + +func (*validateOpDisableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpDisconnectCustomKeyStore struct { +} + +func (*validateOpDisconnectCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDisconnectCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DisconnectCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDisconnectCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKey struct { +} + +func (*validateOpEnableKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEnableKeyRotation struct { +} + +func (*validateOpEnableKeyRotation) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEnableKeyRotation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EnableKeyRotationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEnableKeyRotationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpEncrypt struct { +} + +func (*validateOpEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*EncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKey struct { +} + +func (*validateOpGenerateDataKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPair struct { +} + +func (*validateOpGenerateDataKeyPair) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPair) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyPairWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyPairWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyPairWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyPairWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyPairWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateDataKeyWithoutPlaintext struct { +} + +func (*validateOpGenerateDataKeyWithoutPlaintext) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateDataKeyWithoutPlaintext) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateDataKeyWithoutPlaintextInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateDataKeyWithoutPlaintextInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGenerateMac struct { +} + +func (*validateOpGenerateMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGenerateMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GenerateMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGenerateMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyPolicy struct { +} + +func (*validateOpGetKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetKeyRotationStatus struct { +} + +func (*validateOpGetKeyRotationStatus) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetKeyRotationStatus) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetKeyRotationStatusInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetKeyRotationStatusInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetParametersForImport struct { +} + +func (*validateOpGetParametersForImport) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetParametersForImport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetParametersForImportInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetParametersForImportInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpGetPublicKey struct { +} + +func (*validateOpGetPublicKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetPublicKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetPublicKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetPublicKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpImportKeyMaterial struct { +} + +func (*validateOpImportKeyMaterial) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpImportKeyMaterial) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ImportKeyMaterialInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpImportKeyMaterialInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListGrants struct { +} + +func (*validateOpListGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListKeyPolicies struct { +} + +func (*validateOpListKeyPolicies) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListKeyPolicies) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListKeyPoliciesInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListKeyPoliciesInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListResourceTags struct { +} + +func (*validateOpListResourceTags) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListResourceTags) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListResourceTagsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListResourceTagsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpListRetirableGrants struct { +} + +func (*validateOpListRetirableGrants) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpListRetirableGrants) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ListRetirableGrantsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpListRetirableGrantsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpPutKeyPolicy struct { +} + +func (*validateOpPutKeyPolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutKeyPolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutKeyPolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutKeyPolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReEncrypt struct { +} + +func (*validateOpReEncrypt) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReEncrypt) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReEncryptInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReEncryptInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpReplicateKey struct { +} + +func (*validateOpReplicateKey) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpReplicateKey) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ReplicateKeyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpReplicateKeyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRevokeGrant struct { +} + +func (*validateOpRevokeGrant) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRevokeGrant) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RevokeGrantInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRevokeGrantInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpScheduleKeyDeletion struct { +} + +func (*validateOpScheduleKeyDeletion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpScheduleKeyDeletion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ScheduleKeyDeletionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpScheduleKeyDeletionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpSign struct { +} + +func (*validateOpSign) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpSign) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*SignInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpSignInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpTagResource struct { +} + +func (*validateOpTagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*TagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpTagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUntagResource struct { +} + +func (*validateOpUntagResource) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UntagResourceInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUntagResourceInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateAlias struct { +} + +func (*validateOpUpdateAlias) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateAlias) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateAliasInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateAliasInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateCustomKeyStore struct { +} + +func (*validateOpUpdateCustomKeyStore) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateCustomKeyStore) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateCustomKeyStoreInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateCustomKeyStoreInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdateKeyDescription struct { +} + +func (*validateOpUpdateKeyDescription) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdateKeyDescription) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdateKeyDescriptionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdateKeyDescriptionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpUpdatePrimaryRegion struct { +} + +func (*validateOpUpdatePrimaryRegion) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpUpdatePrimaryRegion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*UpdatePrimaryRegionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpUpdatePrimaryRegionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerify struct { +} + +func (*validateOpVerify) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerify) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpVerifyMac struct { +} + +func (*validateOpVerifyMac) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpVerifyMac) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*VerifyMacInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpVerifyMacInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCancelKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCancelKeyDeletion{}, middleware.After) +} + +func addOpConnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpConnectCustomKeyStore{}, middleware.After) +} + +func addOpCreateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateAlias{}, middleware.After) +} + +func addOpCreateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateCustomKeyStore{}, middleware.After) +} + +func addOpCreateGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateGrant{}, middleware.After) +} + +func addOpCreateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateKey{}, middleware.After) +} + +func addOpDecryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDecrypt{}, middleware.After) +} + +func addOpDeleteAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteAlias{}, middleware.After) +} + +func addOpDeleteCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteCustomKeyStore{}, middleware.After) +} + +func addOpDeleteImportedKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteImportedKeyMaterial{}, middleware.After) +} + +func addOpDescribeKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeKey{}, middleware.After) +} + +func addOpDisableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKey{}, middleware.After) +} + +func addOpDisableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisableKeyRotation{}, middleware.After) +} + +func addOpDisconnectCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDisconnectCustomKeyStore{}, middleware.After) +} + +func addOpEnableKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKey{}, middleware.After) +} + +func addOpEnableKeyRotationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEnableKeyRotation{}, middleware.After) +} + +func addOpEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpEncrypt{}, middleware.After) +} + +func addOpGenerateDataKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKey{}, middleware.After) +} + +func addOpGenerateDataKeyPairValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPair{}, middleware.After) +} + +func addOpGenerateDataKeyPairWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyPairWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateDataKeyWithoutPlaintextValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateDataKeyWithoutPlaintext{}, middleware.After) +} + +func addOpGenerateMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGenerateMac{}, middleware.After) +} + +func addOpGetKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyPolicy{}, middleware.After) +} + +func addOpGetKeyRotationStatusValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetKeyRotationStatus{}, middleware.After) +} + +func addOpGetParametersForImportValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetParametersForImport{}, middleware.After) +} + +func addOpGetPublicKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetPublicKey{}, middleware.After) +} + +func addOpImportKeyMaterialValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpImportKeyMaterial{}, middleware.After) +} + +func addOpListGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListGrants{}, middleware.After) +} + +func addOpListKeyPoliciesValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListKeyPolicies{}, middleware.After) +} + +func addOpListResourceTagsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListResourceTags{}, middleware.After) +} + +func addOpListRetirableGrantsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpListRetirableGrants{}, middleware.After) +} + +func addOpPutKeyPolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutKeyPolicy{}, middleware.After) +} + +func addOpReEncryptValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReEncrypt{}, middleware.After) +} + +func addOpReplicateKeyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpReplicateKey{}, middleware.After) +} + +func addOpRevokeGrantValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRevokeGrant{}, middleware.After) +} + +func addOpScheduleKeyDeletionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpScheduleKeyDeletion{}, middleware.After) +} + +func addOpSignValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpSign{}, middleware.After) +} + +func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) +} + +func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After) +} + +func addOpUpdateAliasValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateAlias{}, middleware.After) +} + +func addOpUpdateCustomKeyStoreValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateCustomKeyStore{}, middleware.After) +} + +func addOpUpdateKeyDescriptionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdateKeyDescription{}, middleware.After) +} + +func addOpUpdatePrimaryRegionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpUpdatePrimaryRegion{}, middleware.After) +} + +func addOpVerifyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerify{}, middleware.After) +} + +func addOpVerifyMacValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpVerifyMac{}, middleware.After) +} + +func validateTag(v *types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "Tag"} + if v.TagKey == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKey")) + } + if v.TagValue == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagValue")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateTagList(v []types.Tag) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagList"} + for i := range v { + if err := validateTag(&v[i]); err != nil { + invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCancelKeyDeletionInput(v *CancelKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CancelKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpConnectCustomKeyStoreInput(v *ConnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ConnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateAliasInput(v *CreateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateCustomKeyStoreInput(v *CreateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateCustomKeyStoreInput"} + if v.CustomKeyStoreName == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateGrantInput(v *CreateGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GranteePrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("GranteePrincipal")) + } + if v.Operations == nil { + invalidParams.Add(smithy.NewErrParamRequired("Operations")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpCreateKeyInput(v *CreateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateKeyInput"} + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDecryptInput(v *DecryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DecryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteAliasInput(v *DeleteAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteCustomKeyStoreInput(v *DeleteCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDeleteImportedKeyMaterialInput(v *DeleteImportedKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteImportedKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDescribeKeyInput(v *DescribeKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyInput(v *DisableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisableKeyRotationInput(v *DisableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpDisconnectCustomKeyStoreInput(v *DisconnectCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DisconnectCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyInput(v *EnableKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEnableKeyRotationInput(v *EnableKeyRotationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EnableKeyRotationInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpEncryptInput(v *EncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "EncryptInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Plaintext == nil { + invalidParams.Add(smithy.NewErrParamRequired("Plaintext")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyInput(v *GenerateDataKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairInput(v *GenerateDataKeyPairInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyPairWithoutPlaintextInput(v *GenerateDataKeyPairWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyPairWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.KeyPairSpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("KeyPairSpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateDataKeyWithoutPlaintextInput(v *GenerateDataKeyWithoutPlaintextInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateDataKeyWithoutPlaintextInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGenerateMacInput(v *GenerateMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GenerateMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyPolicyInput(v *GetKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PolicyName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetKeyRotationStatusInput(v *GetKeyRotationStatusInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetKeyRotationStatusInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetParametersForImportInput(v *GetParametersForImportInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetParametersForImportInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.WrappingAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingAlgorithm")) + } + if len(v.WrappingKeySpec) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("WrappingKeySpec")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpGetPublicKeyInput(v *GetPublicKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetPublicKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpImportKeyMaterialInput(v *ImportKeyMaterialInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ImportKeyMaterialInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ImportToken == nil { + invalidParams.Add(smithy.NewErrParamRequired("ImportToken")) + } + if v.EncryptedKeyMaterial == nil { + invalidParams.Add(smithy.NewErrParamRequired("EncryptedKeyMaterial")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListGrantsInput(v *ListGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListGrantsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListKeyPoliciesInput(v *ListKeyPoliciesInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListKeyPoliciesInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListResourceTagsInput(v *ListResourceTagsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListResourceTagsInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpListRetirableGrantsInput(v *ListRetirableGrantsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ListRetirableGrantsInput"} + if v.RetiringPrincipal == nil { + invalidParams.Add(smithy.NewErrParamRequired("RetiringPrincipal")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpPutKeyPolicyInput(v *PutKeyPolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutKeyPolicyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PolicyName == nil { + invalidParams.Add(smithy.NewErrParamRequired("PolicyName")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReEncryptInput(v *ReEncryptInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReEncryptInput"} + if v.CiphertextBlob == nil { + invalidParams.Add(smithy.NewErrParamRequired("CiphertextBlob")) + } + if v.DestinationKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DestinationKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpReplicateKeyInput(v *ReplicateKeyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ReplicateKeyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.ReplicaRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("ReplicaRegion")) + } + if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRevokeGrantInput(v *RevokeGrantInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RevokeGrantInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.GrantId == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpScheduleKeyDeletionInput(v *ScheduleKeyDeletionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ScheduleKeyDeletionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpSignInput(v *SignInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "SignInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpTagResourceInput(v *TagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Tags == nil { + invalidParams.Add(smithy.NewErrParamRequired("Tags")) + } else if v.Tags != nil { + if err := validateTagList(v.Tags); err != nil { + invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUntagResourceInput(v *UntagResourceInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.TagKeys == nil { + invalidParams.Add(smithy.NewErrParamRequired("TagKeys")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateAliasInput(v *UpdateAliasInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateAliasInput"} + if v.AliasName == nil { + invalidParams.Add(smithy.NewErrParamRequired("AliasName")) + } + if v.TargetKeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("TargetKeyId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateCustomKeyStoreInput(v *UpdateCustomKeyStoreInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateCustomKeyStoreInput"} + if v.CustomKeyStoreId == nil { + invalidParams.Add(smithy.NewErrParamRequired("CustomKeyStoreId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdateKeyDescriptionInput(v *UpdateKeyDescriptionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdateKeyDescriptionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Description == nil { + invalidParams.Add(smithy.NewErrParamRequired("Description")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpUpdatePrimaryRegionInput(v *UpdatePrimaryRegionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "UpdatePrimaryRegionInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.PrimaryRegion == nil { + invalidParams.Add(smithy.NewErrParamRequired("PrimaryRegion")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyInput(v *VerifyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyInput"} + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.Signature == nil { + invalidParams.Add(smithy.NewErrParamRequired("Signature")) + } + if len(v.SigningAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("SigningAlgorithm")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpVerifyMacInput(v *VerifyMacInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "VerifyMacInput"} + if v.Message == nil { + invalidParams.Add(smithy.NewErrParamRequired("Message")) + } + if v.KeyId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KeyId")) + } + if len(v.MacAlgorithm) == 0 { + invalidParams.Add(smithy.NewErrParamRequired("MacAlgorithm")) + } + if v.Mac == nil { + invalidParams.Add(smithy.NewErrParamRequired("Mac")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 0bfbcf6d69..d01f604ea8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,76 @@ +# v1.11.21 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.20 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.19 (2022-08-30) + +* **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference. + +# v1.11.18 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.17 (2022-08-15) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) + +# v1.11.16 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.15 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.14 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.13 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.12 (2022-07-11) + +* No change notes available for this release. + +# v1.11.11 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.10 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.9 (2022-06-16) + +* No change notes available for this release. + +# v1.11.8 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.7 (2022-05-26) + +* No change notes available for this release. + +# v1.11.6 (2022-05-25) + +* No change notes available for this release. + +# v1.11.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index 85556599f8..1c2b7499d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -32,7 +32,7 @@ type GetRoleCredentialsInput struct { // The token issued by the CreateToken API call. For more information, see // CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. AccessToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index 1923c4a9d6..4fffc77af5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -32,7 +32,7 @@ type ListAccountRolesInput struct { // The token issued by the CreateToken API call. For more information, see // CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. AccessToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index c76f6ca38d..e717a426c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -14,7 +14,8 @@ import ( // Lists all AWS accounts assigned to the user. These AWS accounts are assigned by // the administrator of the account. For more information, see Assign User Access // (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the AWS SSO User Guide. This operation returns a paginated response. +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) { if params == nil { params = &ListAccountsInput{} @@ -35,7 +36,7 @@ type ListAccountsInput struct { // The token issued by the CreateToken API call. For more information, see // CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. AccessToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index cbc72877d9..8b9b44745e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -9,7 +9,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the client- and server-side session that is associated with the user. +// Removes the locally stored SSO tokens from the client-side cache and sends an +// API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. If a user uses IAM Identity +// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is +// used to obtain an IAM session, as specified in the corresponding IAM Identity +// Center permission set. More specifically, IAM Identity Center assumes an IAM +// role in the target account on behalf of the user, and the corresponding +// temporary AWS credentials are returned to the client. After user logout, any +// existing IAM role sessions that were created by using IAM Identity Center +// permission sets continue based on the duration configured in the permission set. +// For more information, see User authentications +// (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) in +// the IAM Identity Center User Guide. func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) { if params == nil { params = &LogoutInput{} @@ -30,7 +42,7 @@ type LogoutInput struct { // The token issued by the CreateToken API call. For more information, see // CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // This member is required. AccessToken *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go index c5d03d8e4a..f981b154fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go @@ -3,18 +3,20 @@ // Package sso provides the API client, operations, and parameter types for AWS // Single Sign-On. // -// AWS Single Sign-On Portal is a web service that makes it easy for you to assign -// user access to AWS SSO resources such as the user portal. Users can get AWS -// account applications and roles assigned to them and get federated into the -// application. For general information about AWS SSO, see What is AWS Single -// Sign-On? -// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the -// AWS SSO User Guide. This API reference guide describes the AWS SSO Portal -// operations that you can call programatically and includes detailed information -// on data types and errors. AWS provides SDKs that consist of libraries and sample -// code for various programming languages and platforms, such as Java, Ruby, .Net, -// iOS, or Android. The SDKs provide a convenient way to create programmatic access -// to AWS SSO and other AWS services. For more information about the AWS SDKs, +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity Center +// resources such as the AWS access portal. Users can get AWS account applications +// and roles assigned to them and get federated into the application. Although AWS +// Single Sign-On was renamed, the sso and identitystore API namespaces will +// continue to retain their original name for backward compatibility purposes. For +// more information, see IAM Identity Center rename +// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// This reference guide describes the IAM Identity Center Portal operations that +// you can call programatically and includes detailed information on data types and +// errors. AWS provides SDKs that consist of libraries and sample code for various +// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android. +// The SDKs provide a convenient way to create programmatic access to IAM Identity +// Center and other AWS services. For more information about the AWS SDKs, // including how to download and install them, see Tools for Amazon Web Services // (http://aws.amazon.com/tools/). package sso diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index a60df06234..6f74de1efe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.3" +const goModuleVersion = "1.11.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index c8d1689927..aeac293ea4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -135,6 +135,14 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.Aws, IsRegionalized: true, Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, endpoints.EndpointKey{ Region: "ap-northeast-1", }: endpoints.Endpoint{ @@ -151,6 +159,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-northeast-2", }, }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{ @@ -199,6 +215,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-north-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ @@ -223,6 +247,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-west-3", }, }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, endpoints.EndpointKey{ Region: "sa-east-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md new file mode 100644 index 0000000000..6eb8e7f31f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -0,0 +1,160 @@ +# v1.13.3 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2022-08-25) + +* **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action. + +# v1.12.14 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.13 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.12 (2022-08-08) + +* **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.11 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.10 (2022-07-11) + +* No change notes available for this release. + +# v1.12.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.6 (2022-05-27) + +* No change notes available for this release. + +# v1.12.5 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.3 (2022-03-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.2 (2022-03-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.1 (2022-03-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2022-03-08) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.0 (2022-02-24) + +* **Feature**: API client updated +* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2022-01-14) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.0 (2022-01-07) + +* **Feature**: API client updated +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.2 (2021-12-02) + +* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514)) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.1 (2021-11-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.8.0 (2021-11-06) + +* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.7.0 (2021-10-21) + +* **Feature**: Updated to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2021-10-11) + +* **Feature**: API client updated +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.0 (2021-09-17) + +* **Feature**: Updated API client and endpoints to latest revision. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2021-08-27) + +* **Feature**: Updated API model to latest revision. +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2021-08-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.2 (2021-08-04) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.1 (2021-07-15) + +* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.0 (2021-06-25) + +* **Feature**: Updated `github.com/aws/smithy-go` to latest version +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2021-05-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2021-05-14) + +* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting. +* **Dependency Update**: Updated to the latest SDK module versions + diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go new file mode 100644 index 0000000000..5e0a85a2c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -0,0 +1,433 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/defaults" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + smithy "github.com/aws/smithy-go" + smithydocument "github.com/aws/smithy-go/document" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net" + "net/http" + "time" +) + +const ServiceID = "SSO OIDC" +const ServiceAPIVersion = "2019-06-10" + +// Client provides the API client to make operations call for AWS SSO OIDC. +type Client struct { + options Options +} + +// New returns an initialized Client based on the functional options. Provide +// additional functional options to further configure the behavior of the client, +// such as changing the client's endpoint or adding custom middleware behavior. +func New(options Options, optFns ...func(*Options)) *Client { + options = options.Copy() + + resolveDefaultLogger(&options) + + setResolvedDefaultsMode(&options) + + resolveRetryer(&options) + + resolveHTTPClient(&options) + + resolveHTTPSignerV4(&options) + + resolveDefaultEndpointConfiguration(&options) + + for _, fn := range optFns { + fn(&options) + } + + client := &Client{ + options: options, + } + + return client +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + EndpointResolver EndpointResolver + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. If specified in an operation call's functional + // options with a value that is different than the constructed client's Options, + // the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// WithEndpointResolver returns a functional option for setting the Client's +// EndpointResolver option. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} +func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { + ctx = middleware.ClearStackValues(ctx) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) + options := c.options.Copy() + for _, fn := range optFns { + fn(&options) + } + + finalizeRetryMaxAttemptOptions(&options, *c) + + finalizeClientEndpointResolverOptions(&options) + + for _, fn := range stackFns { + if err := fn(stack, options); err != nil { + return nil, metadata, err + } + } + + for _, fn := range options.APIOptions { + if err := fn(stack); err != nil { + return nil, metadata, err + } + } + + handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) + result, metadata, err = handler.Handle(ctx, params) + if err != nil { + err = &smithy.OperationError{ + ServiceID: ServiceID, + OperationName: opID, + Err: err, + } + } + return result, metadata, err +} + +type noSmithyDocumentSerde = smithydocument.NoSerde + +func resolveDefaultLogger(o *Options) { + if o.Logger != nil { + return + } + o.Logger = logging.Nop{} +} + +func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error { + return middleware.AddSetLoggerMiddleware(stack, o.Logger) +} + +func setResolvedDefaultsMode(o *Options) { + if len(o.resolvedDefaultsMode) > 0 { + return + } + + var mode aws.DefaultsMode + mode.SetFromString(string(o.DefaultsMode)) + + if mode == aws.DefaultsModeAuto { + mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment) + } + + o.resolvedDefaultsMode = mode +} + +// NewFromConfig returns a new client from the provided config. +func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { + opts := Options{ + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + } + resolveAWSRetryerProvider(cfg, &opts) + resolveAWSRetryMaxAttempts(cfg, &opts) + resolveAWSRetryMode(cfg, &opts) + resolveAWSEndpointResolver(cfg, &opts) + resolveUseDualStackEndpoint(cfg, &opts) + resolveUseFIPSEndpoint(cfg, &opts) + return New(opts, optFns...) +} + +func resolveHTTPClient(o *Options) { + var buildable *awshttp.BuildableClient + + if o.HTTPClient != nil { + var ok bool + buildable, ok = o.HTTPClient.(*awshttp.BuildableClient) + if !ok { + return + } + } else { + buildable = awshttp.NewBuildableClient() + } + + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) { + if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok { + dialer.Timeout = dialerTimeout + } + }) + + buildable = buildable.WithTransportOptions(func(transport *http.Transport) { + if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok { + transport.TLSHandshakeTimeout = tlsHandshakeTimeout + } + }) + } + + o.HTTPClient = buildable +} + +func resolveRetryer(o *Options) { + if o.Retryer != nil { + return + } + + if len(o.RetryMode) == 0 { + modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode) + if err == nil { + o.RetryMode = modeConfig.RetryMode + } + } + if len(o.RetryMode) == 0 { + o.RetryMode = aws.RetryModeStandard + } + + var standardOptions []func(*retry.StandardOptions) + if v := o.RetryMaxAttempts; v != 0 { + standardOptions = append(standardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = v + }) + } + + switch o.RetryMode { + case aws.RetryModeAdaptive: + var adaptiveOptions []func(*retry.AdaptiveModeOptions) + if len(standardOptions) != 0 { + adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) { + ao.StandardOptions = append(ao.StandardOptions, standardOptions...) + }) + } + o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...) + + default: + o.Retryer = retry.NewStandard(standardOptions...) + } +} + +func resolveAWSRetryerProvider(cfg aws.Config, o *Options) { + if cfg.Retryer == nil { + return + } + o.Retryer = cfg.Retryer() +} + +func resolveAWSRetryMode(cfg aws.Config, o *Options) { + if len(cfg.RetryMode) == 0 { + return + } + o.RetryMode = cfg.RetryMode +} +func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { + if cfg.RetryMaxAttempts == 0 { + return + } + o.RetryMaxAttempts = cfg.RetryMaxAttempts +} + +func finalizeRetryMaxAttemptOptions(o *Options, client Client) { + if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { + if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { + return + } + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) +} + +func addClientUserAgent(stack *middleware.Stack) error { + return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack) +} + +func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: o.Credentials, + Signer: o.HTTPSignerV4, + LogSigning: o.ClientLogMode.IsSigning(), + }) + return stack.Finalize.Add(mw, middleware.After) +} + +type HTTPSignerV4 interface { + SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error +} + +func resolveHTTPSignerV4(o *Options) { + if o.HTTPSignerV4 != nil { + return + } + o.HTTPSignerV4 = newDefaultV4Signer(*o) +} + +func newDefaultV4Signer(o Options) *v4.Signer { + return v4.NewSigner(func(so *v4.SignerOptions) { + so.Logger = o.Logger + so.LogSigning = o.ClientLogMode.IsSigning() + }) +} + +func addRetryMiddlewares(stack *middleware.Stack, o Options) error { + mo := retry.AddRetryMiddlewaresOptions{ + Retryer: o.Retryer, + LogRetryAttempts: o.ClientLogMode.IsRetries(), + } + return retry.AddRetryMiddlewares(stack, mo) +} + +// resolves dual-stack endpoint configuration +func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseDualStackEndpoint = value + } + return nil +} + +// resolves FIPS endpoint configuration +func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.EndpointOptions.UseFIPSEndpoint = value + } + return nil +} + +func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error { + return awsmiddleware.AddRequestIDRetrieverMiddleware(stack) +} + +func addResponseErrorMiddleware(stack *middleware.Stack) error { + return awshttp.AddResponseErrorMiddleware(stack) +} + +func addRequestResponseLogging(stack *middleware.Stack, o Options) error { + return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ + LogRequest: o.ClientLogMode.IsRequest(), + LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(), + LogResponse: o.ClientLogMode.IsResponse(), + LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), + }, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go new file mode 100644 index 0000000000..c6e64a13dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -0,0 +1,162 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates and returns an access token for the authorized client. The access token +// issued will be used to fetch short-term credentials for the assigned roles in +// the AWS account. +func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) { + if params == nil { + params = &CreateTokenInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateToken", params, optFns, c.addOperationCreateTokenMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateTokenOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateTokenInput struct { + + // The unique identifier string for each client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientId *string + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // This member is required. + ClientSecret *string + + // Supports grant types for authorization code, refresh token, and device code + // request. + // + // This member is required. + GrantType *string + + // The authorization code received from the authorization service. This parameter + // is required to perform an authorization grant request to get access to a token. + Code *string + + // Used only when calling this API for the device code grant type. This short-term + // code is used to identify this authentication attempt. This should come from an + // in-memory reference to the result of the StartDeviceAuthorization API. + DeviceCode *string + + // The location of the application that will receive the authorization code. Users + // authorize the service to send the request to this location. + RedirectUri *string + + // The token used to obtain an access token in the event that the access token is + // invalid or expired. This token is not issued by the service. + RefreshToken *string + + // The list of scopes that is defined by the client. Upon authorization, this list + // is used to restrict permissions when granting an access token. + Scope []string + + noSmithyDocumentSerde +} + +type CreateTokenOutput struct { + + // An opaque token to access AWS SSO resources assigned to a user. + AccessToken *string + + // Indicates the time in seconds when an access token will expire. + ExpiresIn int32 + + // The identifier of the user that associated with the access token, if present. + IdToken *string + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + RefreshToken *string + + // Used to notify the client that the returned token is an access token. The + // supported type is BearerToken. + TokenType *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateToken{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpCreateTokenValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateToken(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateToken", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go new file mode 100644 index 0000000000..096b35df28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -0,0 +1,141 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Registers a client with AWS SSO. This allows clients to initiate device +// authorization. The output should be persisted for reuse through many +// authentication requests. +func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) { + if params == nil { + params = &RegisterClientInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RegisterClient", params, optFns, c.addOperationRegisterClientMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RegisterClientOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RegisterClientInput struct { + + // The friendly name of the client. + // + // This member is required. + ClientName *string + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // This member is required. + ClientType *string + + // The list of scopes that are defined by the client. Upon authorization, this list + // is used to restrict permissions when granting an access token. + Scopes []string + + noSmithyDocumentSerde +} + +type RegisterClientOutput struct { + + // The endpoint where the client can request authorization. + AuthorizationEndpoint *string + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt int64 + + // A secret string generated for the client. The client will use this string to get + // authenticated by the service in subsequent calls. + ClientSecret *string + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt int64 + + // The endpoint where the client can get an access token. + TokenEndpoint *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpRegisterClient{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpRegisterClientValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRegisterClient(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RegisterClient", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go new file mode 100644 index 0000000000..0d893b4319 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -0,0 +1,150 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Initiates device authorization by requesting a pair of verification codes from +// the authorization service. +func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDeviceAuthorizationInput, optFns ...func(*Options)) (*StartDeviceAuthorizationOutput, error) { + if params == nil { + params = &StartDeviceAuthorizationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StartDeviceAuthorization", params, optFns, c.addOperationStartDeviceAuthorizationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StartDeviceAuthorizationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StartDeviceAuthorizationInput struct { + + // The unique identifier string for the client that is registered with AWS SSO. + // This value should come from the persisted result of the RegisterClient API + // operation. + // + // This member is required. + ClientId *string + + // A secret string that is generated for the client. This value should come from + // the persisted result of the RegisterClient API operation. + // + // This member is required. + ClientSecret *string + + // The URL for the AWS SSO user portal. For more information, see Using the User + // Portal + // (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the AWS Single Sign-On User Guide. + // + // This member is required. + StartUrl *string + + noSmithyDocumentSerde +} + +type StartDeviceAuthorizationOutput struct { + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn int32 + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval int32 + + // A one-time user verification code. This is needed to authorize an in-use device. + UserCode *string + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string + + // An alternate URL that the client can use to automatically launch a browser. This + // process skips the manual step in which the user visits the verification page and + // enters their code. + VerificationUriComplete *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) { + err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStartDeviceAuthorization{}, middleware.After) + if err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStartDeviceAuthorization(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StartDeviceAuthorization", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go new file mode 100644 index 0000000000..e9939aff0d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go @@ -0,0 +1,1689 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson" + "github.com/aws/aws-sdk-go-v2/service/ssooidc/types" + smithy "github.com/aws/smithy-go" + smithyio "github.com/aws/smithy-go/io" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" + "io" + "strings" +) + +type awsRestjson1_deserializeOpCreateToken struct { +} + +func (*awsRestjson1_deserializeOpCreateToken) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorCreateToken(response, &metadata) + } + output := &CreateTokenOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentCreateTokenOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("AuthorizationPendingException", errorCode): + return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody) + + case strings.EqualFold("ExpiredTokenException", errorCode): + return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidGrantException", errorCode): + return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + case strings.EqualFold("UnsupportedGrantTypeException", errorCode): + return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *CreateTokenOutput + if *v == nil { + sv = &CreateTokenOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "accessToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value) + } + sv.AccessToken = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "idToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected IdToken to be of type string, got %T instead", value) + } + sv.IdToken = ptr.String(jtv) + } + + case "refreshToken": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value) + } + sv.RefreshToken = ptr.String(jtv) + } + + case "tokenType": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected TokenType to be of type string, got %T instead", value) + } + sv.TokenType = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpRegisterClient struct { +} + +func (*awsRestjson1_deserializeOpRegisterClient) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorRegisterClient(response, &metadata) + } + output := &RegisterClientOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentRegisterClientOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientMetadataException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("InvalidScopeException", errorCode): + return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentRegisterClientOutput(v **RegisterClientOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *RegisterClientOutput + if *v == nil { + sv = &RegisterClientOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "authorizationEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.AuthorizationEndpoint = ptr.String(jtv) + } + + case "clientId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientId to be of type string, got %T instead", value) + } + sv.ClientId = ptr.String(jtv) + } + + case "clientIdIssuedAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientIdIssuedAt = i64 + } + + case "clientSecret": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ClientSecret to be of type string, got %T instead", value) + } + sv.ClientSecret = ptr.String(jtv) + } + + case "clientSecretExpiresAt": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected LongTimeStampType to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ClientSecretExpiresAt = i64 + } + + case "tokenEndpoint": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.TokenEndpoint = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +type awsRestjson1_deserializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_deserializeOpStartDeviceAuthorization) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response, &metadata) + } + output := &StartDeviceAuthorizationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + code := response.Header.Get("X-Amzn-ErrorType") + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + code, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(code) != 0 { + errorCode = restjson.SanitizeErrorCode(code) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("InvalidClientException", errorCode): + return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody) + + case strings.EqualFold("InvalidRequestException", errorCode): + return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody) + + case strings.EqualFold("SlowDownException", errorCode): + return awsRestjson1_deserializeErrorSlowDownException(response, errorBody) + + case strings.EqualFold("UnauthorizedClientException", errorCode): + return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStartDeviceAuthorizationOutput(v **StartDeviceAuthorizationOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StartDeviceAuthorizationOutput + if *v == nil { + sv = &StartDeviceAuthorizationOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "deviceCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeviceCode to be of type string, got %T instead", value) + } + sv.DeviceCode = ptr.String(jtv) + } + + case "expiresIn": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.ExpiresIn = int32(i64) + } + + case "interval": + if value != nil { + jtv, ok := value.(json.Number) + if !ok { + return fmt.Errorf("expected IntervalInSeconds to be json.Number, got %T instead", value) + } + i64, err := jtv.Int64() + if err != nil { + return err + } + sv.Interval = int32(i64) + } + + case "userCode": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected UserCode to be of type string, got %T instead", value) + } + sv.UserCode = ptr.String(jtv) + } + + case "verificationUri": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUri = ptr.String(jtv) + } + + case "verificationUriComplete": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected URI to be of type string, got %T instead", value) + } + sv.VerificationUriComplete = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeErrorAccessDeniedException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AccessDeniedException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAccessDeniedException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorAuthorizationPendingException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.AuthorizationPendingException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentAuthorizationPendingException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorExpiredTokenException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ExpiredTokenException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentExpiredTokenException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInternalServerException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InternalServerException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInternalServerException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidClientMetadataException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidClientMetadataException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidClientMetadataException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidGrantException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidGrantException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequestException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidRequestException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidScopeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentInvalidScopeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorSlowDownException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SlowDownException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentSlowDownException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnauthorizedClientException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnauthorizedClientException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnauthorizedClientException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedGrantTypeException{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + err := awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + + return output +} + +func awsRestjson1_deserializeDocumentAccessDeniedException(v **types.AccessDeniedException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AccessDeniedException + if *v == nil { + sv = &types.AccessDeniedException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.AuthorizationPendingException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.AuthorizationPendingException + if *v == nil { + sv = &types.AuthorizationPendingException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.ExpiredTokenException + if *v == nil { + sv = &types.ExpiredTokenException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInternalServerException(v **types.InternalServerException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InternalServerException + if *v == nil { + sv = &types.InternalServerException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientException(v **types.InvalidClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientException + if *v == nil { + sv = &types.InvalidClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidClientMetadataException(v **types.InvalidClientMetadataException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidClientMetadataException + if *v == nil { + sv = &types.InvalidClientMetadataException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGrantException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidGrantException + if *v == nil { + sv = &types.InvalidGrantException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidRequestException + if *v == nil { + sv = &types.InvalidRequestException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.InvalidScopeException + if *v == nil { + sv = &types.InvalidScopeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.SlowDownException + if *v == nil { + sv = &types.SlowDownException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnauthorizedClientException(v **types.UnauthorizedClientException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnauthorizedClientException + if *v == nil { + sv = &types.UnauthorizedClientException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + +func awsRestjson1_deserializeDocumentUnsupportedGrantTypeException(v **types.UnsupportedGrantTypeException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.UnsupportedGrantTypeException + if *v == nil { + sv = &types.UnsupportedGrantTypeException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "error": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected Error to be of type string, got %T instead", value) + } + sv.Error_ = ptr.String(jtv) + } + + case "error_description": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value) + } + sv.Error_description = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go new file mode 100644 index 0000000000..79c458291a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go @@ -0,0 +1,22 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +// Package ssooidc provides the API client, operations, and parameter types for AWS +// SSO OIDC. +// +// AWS Single Sign-On (SSO) OpenID Connect (OIDC) is a web service that enables a +// client (such as AWS CLI or a native application) to register with AWS SSO. The +// service also enables the client to fetch the user’s access token upon successful +// authentication and authorization with AWS SSO. This service conforms with the +// OAuth 2.0 based implementation of the device authorization grant standard +// (https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628)). For +// general information about AWS SSO, see What is AWS Single Sign-On? +// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the +// AWS SSO User Guide. This API reference guide describes the AWS SSO OIDC +// operations that you can call programatically and includes detailed information +// on data types and errors. AWS provides SDKs that consist of libraries and sample +// code for various programming languages and platforms such as Java, Ruby, .Net, +// iOS, and Android. The SDKs provide a convenient way to create programmatic +// access to AWS SSO and other AWS services. For more information about the AWS +// SDKs, including how to download and install them, see Tools for Amazon Web +// Services (http://aws.amazon.com/tools/). +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go new file mode 100644 index 0000000000..35cd21f18c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -0,0 +1,200 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/url" + "strings" +) + +// EndpointResolverOptions is the service endpoint resolver options +type EndpointResolverOptions = internalendpoints.Options + +// EndpointResolver interface for resolving service endpoints. +type EndpointResolver interface { + ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error) +} + +var _ EndpointResolver = &internalendpoints.Resolver{} + +// NewDefaultEndpointResolver constructs a new service endpoint resolver +func NewDefaultEndpointResolver() *internalendpoints.Resolver { + return internalendpoints.New() +} + +// EndpointResolverFunc is a helper utility that wraps a function so it satisfies +// the EndpointResolver interface. This is useful when you want to add additional +// endpoint resolving logic, or stub out specific endpoints with custom values. +type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error) + +func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + return fn(region, options) +} + +func resolveDefaultEndpointConfiguration(o *Options) { + if o.EndpointResolver != nil { + return + } + o.EndpointResolver = NewDefaultEndpointResolver() +} + +// EndpointResolverFromURL returns an EndpointResolver configured using the +// provided endpoint url. By default, the resolved endpoint resolver uses the +// client region as signing region, and the endpoint source is set to +// EndpointSourceCustom.You can provide functional options to configure endpoint +// values for the resolved endpoint. +func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver { + e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom} + for _, fn := range optFns { + fn(&e) + } + + return EndpointResolverFunc( + func(region string, options EndpointResolverOptions) (aws.Endpoint, error) { + if len(e.SigningRegion) == 0 { + e.SigningRegion = region + } + return e, nil + }, + ) +} + +type ResolveEndpoint struct { + Resolver EndpointResolver + Options EndpointResolverOptions +} + +func (*ResolveEndpoint) ID() string { + return "ResolveEndpoint" +} + +func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.Resolver == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + eo := m.Options + eo.Logger = middleware.GetLogger(ctx) + + var endpoint aws.Endpoint + endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + req.URL, err = url.Parse(endpoint.URL) + if err != nil { + return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + if len(awsmiddleware.GetSigningName(ctx)) == 0 { + signingName := endpoint.SigningName + if len(signingName) == 0 { + signingName = "awsssooidc" + } + ctx = awsmiddleware.SetSigningName(ctx, signingName) + } + ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source) + ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable) + ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion) + ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID) + return next.HandleSerialize(ctx, in) +} +func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error { + return stack.Serialize.Insert(&ResolveEndpoint{ + Resolver: o.EndpointResolver, + Options: o.EndpointOptions, + }, "OperationSerializer", middleware.Before) +} + +func removeResolveEndpointMiddleware(stack *middleware.Stack) error { + _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID()) + return err +} + +type wrappedEndpointResolver struct { + awsResolver aws.EndpointResolverWithOptions + resolver EndpointResolver +} + +func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { + if w.awsResolver == nil { + goto fallback + } + endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) + if err == nil { + return endpoint, nil + } + + if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { + return endpoint, err + } + +fallback: + if w.resolver == nil { + return endpoint, fmt.Errorf("default endpoint resolver provided was nil") + } + return w.resolver.ResolveEndpoint(region, options) +} + +type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) + +func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) { + return a(service, region) +} + +var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) + +// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided +// fallbackResolver for resolution. +// +// fallbackResolver must not be nil +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { + var resolver aws.EndpointResolverWithOptions + + if awsResolverWithOptions != nil { + resolver = awsResolverWithOptions + } else if awsResolver != nil { + resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint) + } + + return &wrappedEndpointResolver{ + awsResolver: resolver, + resolver: fallbackResolver, + } +} + +func finalizeClientEndpointResolverOptions(options *Options) { + options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage() + + if len(options.EndpointOptions.ResolvedRegion) == 0 { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(options.Region, fipsInfix) || + strings.Contains(options.Region, fipsPrefix) || + strings.Contains(options.Region, fipsSuffix) { + options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll( + options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "") + options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled + } + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json new file mode 100644 index 0000000000..4afe3223e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -0,0 +1,29 @@ +{ + "dependencies": { + "github.com/aws/aws-sdk-go-v2": "v1.4.0", + "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", + "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", + "github.com/aws/smithy-go": "v1.4.0" + }, + "files": [ + "api_client.go", + "api_client_test.go", + "api_op_CreateToken.go", + "api_op_RegisterClient.go", + "api_op_StartDeviceAuthorization.go", + "deserializers.go", + "doc.go", + "endpoints.go", + "generated.json", + "internal/endpoints/endpoints.go", + "internal/endpoints/endpoints_test.go", + "protocol_test.go", + "serializers.go", + "types/errors.go", + "types/types.go", + "validators.go" + ], + "go": "1.15", + "module": "github.com/aws/aws-sdk-go-v2/service/ssooidc", + "unstable": false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go new file mode 100644 index 0000000000..b3d88d9c2a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -0,0 +1,6 @@ +// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT. + +package ssooidc + +// goModuleVersion is the tagged release for this module +const goModuleVersion = "1.13.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go new file mode 100644 index 0000000000..090c04b3d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -0,0 +1,422 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package endpoints + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2" + "github.com/aws/smithy-go/logging" + "regexp" +) + +// Options is the endpoint resolver configuration options +type Options struct { + // Logger is a logging implementation that log events should be sent to. + Logger logging.Logger + + // LogDeprecated indicates that deprecated endpoints should be logged to the + // provided logger. + LogDeprecated bool + + // ResolvedRegion is used to override the region to be resolved, rather then the + // using the value passed to the ResolveEndpoint method. This value is used by the + // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative + // name. You must not set this value directly in your application. + ResolvedRegion string + + // DisableHTTPS informs the resolver to return an endpoint that does not use the + // HTTPS scheme. + DisableHTTPS bool + + // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint. + UseDualStackEndpoint aws.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint aws.FIPSEndpointState +} + +func (o Options) GetResolvedRegion() string { + return o.ResolvedRegion +} + +func (o Options) GetDisableHTTPS() bool { + return o.DisableHTTPS +} + +func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState { + return o.UseDualStackEndpoint +} + +func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState { + return o.UseFIPSEndpoint +} + +func transformToSharedOptions(options Options) endpoints.Options { + return endpoints.Options{ + Logger: options.Logger, + LogDeprecated: options.LogDeprecated, + ResolvedRegion: options.ResolvedRegion, + DisableHTTPS: options.DisableHTTPS, + UseDualStackEndpoint: options.UseDualStackEndpoint, + UseFIPSEndpoint: options.UseFIPSEndpoint, + } +} + +// Resolver SSO OIDC endpoint resolver +type Resolver struct { + partitions endpoints.Partitions +} + +// ResolveEndpoint resolves the service endpoint for the given region and options +func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) { + if len(region) == 0 { + return endpoint, &aws.MissingRegionError{} + } + + opt := transformToSharedOptions(options) + return r.partitions.ResolveEndpoint(region, opt) +} + +// New returns a new Resolver +func New() *Resolver { + return &Resolver{ + partitions: defaultPartitions, + } +} + +var partitionRegexp = struct { + Aws *regexp.Regexp + AwsCn *regexp.Regexp + AwsIso *regexp.Regexp + AwsIsoB *regexp.Regexp + AwsUsGov *regexp.Regexp +}{ + + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), + AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), + AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), +} + +var defaultPartitions = endpoints.Partitions{ + { + ID: "aws", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.Aws, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "ap-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ap-northeast-3", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-northeast-3", + }, + }, + endpoints.EndpointKey{ + Region: "ap-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-1", + }, + }, + endpoints.EndpointKey{ + Region: "ap-southeast-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-2", + }, + }, + endpoints.EndpointKey{ + Region: "ca-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-1", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-2", + }, + }, + endpoints.EndpointKey{ + Region: "eu-west-3", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-west-3", + }, + }, + endpoints.EndpointKey{ + Region: "me-south-1", + }: endpoints.Endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-south-1", + }, + }, + endpoints.EndpointKey{ + Region: "sa-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "sa-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-east-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-east-2", + }, + }, + endpoints.EndpointKey{ + Region: "us-west-2", + }: endpoints.Endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + { + ID: "aws-cn", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsCn, + IsRegionalized: true, + }, + { + ID: "aws-iso", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIso, + IsRegionalized: true, + }, + { + ID: "aws-iso-b", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoB, + IsRegionalized: true, + }, + { + ID: "aws-us-gov", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.DualStackVariant, + }: { + Hostname: "oidc.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "oidc-fips.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "oidc-fips.{region}.api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "oidc.{region}.amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsUsGov, + IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-gov-east-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-east-1", + }, + }, + endpoints.EndpointKey{ + Region: "us-gov-west-1", + }: endpoints.Endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go new file mode 100644 index 0000000000..a8cfd7b46c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go @@ -0,0 +1,288 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "bytes" + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + smithyjson "github.com/aws/smithy-go/encoding/json" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +type awsRestjson1_serializeOpCreateToken struct { +} + +func (*awsRestjson1_serializeOpCreateToken) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateTokenInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/token") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentCreateTokenInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.Code != nil { + ok := object.Key("code") + ok.String(*v.Code) + } + + if v.DeviceCode != nil { + ok := object.Key("deviceCode") + ok.String(*v.DeviceCode) + } + + if v.GrantType != nil { + ok := object.Key("grantType") + ok.String(*v.GrantType) + } + + if v.RedirectUri != nil { + ok := object.Key("redirectUri") + ok.String(*v.RedirectUri) + } + + if v.RefreshToken != nil { + ok := object.Key("refreshToken") + ok.String(*v.RefreshToken) + } + + if v.Scope != nil { + ok := object.Key("scope") + if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpRegisterClient struct { +} + +func (*awsRestjson1_serializeOpRegisterClient) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RegisterClientInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/client/register") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentRegisterClientInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientName != nil { + ok := object.Key("clientName") + ok.String(*v.ClientName) + } + + if v.ClientType != nil { + ok := object.Key("clientType") + ok.String(*v.ClientType) + } + + if v.Scopes != nil { + ok := object.Key("scopes") + if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil { + return err + } + } + + return nil +} + +type awsRestjson1_serializeOpStartDeviceAuthorization struct { +} + +func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/device_authorization") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + restEncoder.SetHeader("Content-Type").String("application/json") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + return nil +} + +func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ClientId != nil { + ok := object.Key("clientId") + ok.String(*v.ClientId) + } + + if v.ClientSecret != nil { + ok := object.Key("clientSecret") + ok.String(*v.ClientSecret) + } + + if v.StartUrl != nil { + ok := object.Key("startUrl") + ok.String(*v.StartUrl) + } + + return nil +} + +func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error { + array := value.Array() + defer array.Close() + + for i := range v { + av := array.Value() + av.String(v[i]) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go new file mode 100644 index 0000000000..beef5aaa37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go @@ -0,0 +1,282 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + "fmt" + smithy "github.com/aws/smithy-go" +) + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AccessDeniedException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AccessDeniedException) ErrorCode() string { return "AccessDeniedException" } +func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request to authorize a client with an access user session token +// is pending. +type AuthorizationPendingException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *AuthorizationPendingException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *AuthorizationPendingException) ErrorCode() string { return "AuthorizationPendingException" } +func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *ExpiredTokenException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" } +func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that an error from the service occurred while trying to process a +// request. +type InternalServerException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InternalServerException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InternalServerException) ErrorCode() string { return "InternalServerException" } +func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer } + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientException) ErrorCode() string { return "InvalidClientException" } +func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client information sent in the request during registration is +// invalid. +type InvalidClientMetadataException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidClientMetadataException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidClientMetadataException) ErrorCode() string { return "InvalidClientMetadataException" } +func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidGrantException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidGrantException) ErrorCode() string { return "InvalidGrantException" } +func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that something is wrong with the input to the request. For example, a +// required parameter might be missing or out of range. +type InvalidRequestException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequestException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequestException) ErrorCode() string { return "InvalidRequestException" } +func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidScopeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidScopeException) ErrorCode() string { return "InvalidScopeException" } +func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is making the request too frequently and is more than +// the service can handle. +type SlowDownException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *SlowDownException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *SlowDownException) ErrorCode() string { return "SlowDownException" } +func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the client is not currently authorized to make the request. This +// can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnauthorizedClientException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnauthorizedClientException) ErrorCode() string { return "UnauthorizedClientException" } +func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + Message *string + + Error_ *string + Error_description *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedGrantTypeException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedGrantTypeException) ErrorCode() string { return "UnsupportedGrantTypeException" } +func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go new file mode 100644 index 0000000000..0ec0789f8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go @@ -0,0 +1,9 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package types + +import ( + smithydocument "github.com/aws/smithy-go/document" +) + +type noSmithyDocumentSerde = smithydocument.NoSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go new file mode 100644 index 0000000000..5a309484e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go @@ -0,0 +1,142 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package ssooidc + +import ( + "context" + "fmt" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +type validateOpCreateToken struct { +} + +func (*validateOpCreateToken) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateTokenInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateTokenInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpRegisterClient struct { +} + +func (*validateOpRegisterClient) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRegisterClient) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RegisterClientInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRegisterClientInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +type validateOpStartDeviceAuthorization struct { +} + +func (*validateOpStartDeviceAuthorization) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStartDeviceAuthorization) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StartDeviceAuthorizationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStartDeviceAuthorizationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + +func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After) +} + +func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After) +} + +func addOpStartDeviceAuthorizationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStartDeviceAuthorization{}, middleware.After) +} + +func validateOpCreateTokenInput(v *CreateTokenInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateTokenInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.GrantType == nil { + invalidParams.Add(smithy.NewErrParamRequired("GrantType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpRegisterClientInput(v *RegisterClientInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RegisterClientInput"} + if v.ClientName == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientName")) + } + if v.ClientType == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientType")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + +func validateOpStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StartDeviceAuthorizationInput"} + if v.ClientId == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientId")) + } + if v.ClientSecret == nil { + invalidParams.Add(smithy.NewErrParamRequired("ClientSecret")) + } + if v.StartUrl == nil { + invalidParams.Add(smithy.NewErrParamRequired("StartUrl")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 6ad12851a2..6f4bc94df9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,59 @@ +# v1.16.17 (2022-09-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.16 (2022-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2022-08-30) + +* No change notes available for this release. + +# v1.16.14 (2022-08-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2022-08-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2022-08-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2022-08-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2022-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2022-07-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2022-06-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2022-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2022-05-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2022-05-16) + +* **Documentation**: Documentation updates for AWS Security Token Service. + +# v1.16.4 (2022-04-25) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.3 (2022-03-30) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 4bff1dfe22..3041fc467e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -512,6 +512,9 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op if err != nil { return err } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } // convert request to a GET request err = query.AddAsGetRequestMiddleware(stack) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index b292f208a0..bfde51689d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -26,6 +26,11 @@ import ( // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) // and Comparing the Amazon Web Services STS API operations // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. No permissions are required for users to perform this +// operation. The purpose of the sts:GetSessionToken operation is to authenticate +// the user using MFA. You cannot use policies to control authentication +// operations. For more information, see Permissions for GetSessionToken +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. Session Duration The GetSessionToken operation must be // called by using the long-term Amazon Web Services security credentials of the // Amazon Web Services account root user or an IAM user. Credentials that are diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index c81e72b70c..2475aa8710 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.3" +const goModuleVersion = "1.16.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go index 28ed441bf8..d061a4e992 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go @@ -191,6 +191,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-west-3", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 8d65ca1d64..cad3b9a488 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -31,12 +31,12 @@ func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { // allow you to get a list of the partitions in the order the endpoints // will be resolved in. // -// resolver, err := endpoints.DecodeModel(reader) +// resolver, err := endpoints.DecodeModel(reader) // -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { var opts DecodeModelOptions opts.Set(optFns...) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 8e41aa0ea4..0d2ea81515 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -33,6 +33,7 @@ const ( EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -186,6 +187,9 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "me-central-1": region{ + Description: "Middle East (UAE)", + }, "me-south-1": region{ Description: "Middle East (Bahrain)", }, @@ -314,6 +318,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -437,6 +444,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -548,6 +558,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -797,18 +810,33 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -1280,6 +1308,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -1503,6 +1539,42 @@ var awsPartition = partition{ }, }, }, + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.iotwireless": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -1623,6 +1695,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -1891,9 +1966,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1912,6 +1996,54 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1921,22 +2053,52 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + }, }, }, "app-integrations": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1960,7 +2122,7 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "appconfigdata": service{ + "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -1986,6 +2148,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2007,6 +2172,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2027,17 +2195,23 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "appflow": service{ + "appconfigdata": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -2053,6 +2227,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2062,6 +2242,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -2079,28 +2262,17 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, + "appflow": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -2110,21 +2282,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -2134,9 +2297,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -2154,7 +2314,12 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "applicationinsights": service{ + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2180,6 +2345,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2201,6 +2369,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2221,7 +2392,7 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "appmesh": service{ + "applicationinsights": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2235,6 +2406,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -2285,61 +2459,263 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "apprunner": service{ + "appmesh": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", + Region: "af-south-1", }: endpoint{}, endpointKey{ - Region: "fips-us-east-1", + Region: "af-south-1", + Variant: dualStackVariant, }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, + Hostname: "appmesh.af-south-1.api.aws", }, endpointKey{ - Region: "fips-us-east-2", + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, + Hostname: "appmesh.ap-east-1.api.aws", }, endpointKey{ - Region: "fips-us-west-2", + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, }: endpoint{ - Hostname: "apprunner-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, + Hostname: "appmesh.ap-northeast-1.api.aws", }, endpointKey{ - Region: "us-east-1", + Region: "ap-northeast-2", }: endpoint{}, endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, + Region: "ap-northeast-2", + Variant: dualStackVariant, }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", + Hostname: "appmesh.ap-northeast-2.api.aws", }, endpointKey{ - Region: "us-east-2", + Region: "ap-northeast-3", }: endpoint{}, endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, + Region: "ap-northeast-3", + Variant: dualStackVariant, }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", + Hostname: "appmesh.ap-northeast-3.api.aws", }, endpointKey{ - Region: "us-west-2", + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, + }, + }, + "apprunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", }: endpoint{}, endpointKey{ Region: "us-west-2", @@ -2413,6 +2789,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -2522,6 +2901,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -2559,6 +2941,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -2755,6 +3140,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -2920,15 +3308,7 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.{region}.{dnsSuffix}", - }, - }, + "backup-gateway": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -2975,42 +3355,6 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3020,19 +3364,149 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - }, endpointKey{ Region: "us-east-2", }: endpoint{}, endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ Hostname: "fips.batch.us-east-2.amazonaws.com", }, endpointKey{ @@ -3099,6 +3573,101 @@ var awsPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "ce": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -3426,6 +3995,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3523,9 +4095,6 @@ var awsPartition = partition{ }, "cloudhsm": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -3564,6 +4133,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3725,6 +4297,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -3838,6 +4413,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -4140,6 +4718,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -4896,6 +5477,22 @@ var awsPartition = partition{ }, "compute-optimizer": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "compute-optimizer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -4912,6 +5509,14 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, endpointKey{ Region: "ap-south-1", }: endpoint{ @@ -4960,6 +5565,14 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -4984,6 +5597,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "compute-optimizer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -5112,6 +5733,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -5190,6 +5814,22 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "contact-lens": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -5218,6 +5858,115 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "cur": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -5574,21 +6323,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + }, }, }, "dataexchange": service{ @@ -5842,60 +6651,35 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "directconnect": service{ + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -5904,36 +6688,21 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, Deprecated: boxedTrue, }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -5941,7 +6710,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -5950,16 +6719,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -5968,7 +6728,140 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", }, }, }, @@ -5997,6 +6890,76 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "dms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -6074,6 +7037,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6272,30 +7238,66 @@ var awsPartition = partition{ }, "drs": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -6327,6 +7329,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6522,6 +7527,9 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6776,7 +7784,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.ap-south-1.aws", + Hostname: "ec2.ap-south-1.api.aws", }, endpointKey{ Region: "ap-southeast-1", @@ -6812,7 +7820,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.eu-west-1.aws", + Hostname: "ec2.eu-west-1.api.aws", }, endpointKey{ Region: "eu-west-2", @@ -6865,6 +7873,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6875,7 +7886,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.sa-east-1.aws", + Hostname: "ec2.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -6884,7 +7895,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-east-1.aws", + Hostname: "ec2.us-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -6899,7 +7910,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-east-2.aws", + Hostname: "ec2.us-east-2.api.aws", }, endpointKey{ Region: "us-east-2", @@ -6923,7 +7934,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-west-2.aws", + Hostname: "ec2.us-west-2.api.aws", }, endpointKey{ Region: "us-west-2", @@ -7019,6 +8030,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7063,14 +8077,36 @@ var awsPartition = partition{ }, }, }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ Hostname: "fips.eks.{region}.{dnsSuffix}", Protocols: []string{"http", "https"}, }, @@ -7100,6 +8136,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7260,6 +8299,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7366,6 +8408,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -7958,6 +9003,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8111,6 +9159,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8188,24 +9239,96 @@ var awsPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, }, }, "emr-containers": service{ @@ -8335,6 +9458,52 @@ var awsPartition = partition{ }, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, "entitlement.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -8408,6 +9577,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8574,6 +9746,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -8731,6 +9906,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -9720,6 +10898,37 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "glacier": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -9896,6 +11105,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10135,6 +11347,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -10346,7 +11561,23 @@ var awsPartition = partition{ }, }, "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, endpointKey{ Region: "fips-us-east-2", }: endpoint{ @@ -10462,6 +11693,9 @@ var awsPartition = partition{ }, "identity-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -10888,67 +12122,147 @@ var awsPartition = partition{ Region: "ap-southeast-2", }: endpoint{}, endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", + Region: "ca-central-1", }: endpoint{}, endpointKey{ - Region: "eu-west-2", - }: endpoint{}, + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + }, endpointKey{ - Region: "us-east-1", + Region: "eu-central-1", }: endpoint{}, endpointKey{ - Region: "us-east-2", + Region: "eu-west-1", }: endpoint{}, endpointKey{ - Region: "us-west-2", + Region: "eu-west-2", }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ endpointKey{ - Region: "ap-northeast-1", + Region: "fips-ca-central-1", }: endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "ap-northeast-2", + Region: "fips-us-east-1", }: endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + Hostname: "iotevents-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "ap-south-1", + Region: "fips-us-east-2", }: endpoint{ - Hostname: "data.iotevents.ap-south-1.amazonaws.com", + Hostname: "iotevents-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "ap-southeast-1", + Region: "fips-us-west-2", }: endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + Hostname: "iotevents-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "ap-southeast-2", + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", }: endpoint{ Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -10973,6 +12287,42 @@ var awsPartition = partition{ Region: "eu-west-2", }, }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -10981,6 +12331,15 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, endpointKey{ Region: "us-east-2", }: endpoint{ @@ -10989,6 +12348,15 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, endpointKey{ Region: "us-west-2", }: endpoint{ @@ -10997,6 +12365,15 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "iotsecuredtunneling": service{ @@ -11157,12 +12534,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11172,6 +12567,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -11190,6 +12594,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11321,6 +12734,19 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "kafka": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -11596,6 +13022,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -11666,6 +13095,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -11709,6 +13141,9 @@ var awsPartition = partition{ }, "kinesisvideo": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -12046,6 +13481,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12465,6 +13918,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -12571,6 +14027,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12672,53 +14131,7 @@ var awsPartition = partition{ }, }, }, - "lightsail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "logs": service{ + "license-manager-user-subscriptions": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -12744,9 +14157,6 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -12771,7 +14181,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -12780,7 +14190,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -12789,7 +14199,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -12798,7 +14208,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, @@ -12817,7 +14227,186 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -12917,6 +14506,31 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "machinelearning": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13123,6 +14737,52 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, "mediaconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13568,24 +15228,87 @@ var awsPartition = partition{ }, }, }, - "messaging-chime": service{ + "memory-db": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-east-1", + Region: "ap-east-1", }: endpoint{}, endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - }, + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, Deprecated: boxedTrue, }, }, @@ -13647,6 +15370,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -13718,6 +15444,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -13992,6 +15721,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14485,6 +16217,9 @@ var awsPartition = partition{ }, "nimble": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, @@ -14504,6 +16239,14 @@ var awsPartition = partition{ }, "oidc": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -14520,6 +16263,14 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, endpointKey{ Region: "ap-south-1", }: endpoint{ @@ -14568,6 +16319,14 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -14592,6 +16351,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -14764,6 +16531,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14943,6 +16713,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -14964,6 +16737,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15197,6 +16973,14 @@ var awsPartition = partition{ }, "portal.sso": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -15213,6 +16997,14 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, endpointKey{ Region: "ap-south-1", }: endpoint{ @@ -15261,6 +17053,14 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -15285,6 +17085,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -15378,6 +17186,25 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "qldb": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -15695,6 +17522,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15713,6 +17546,54 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -15722,15 +17603,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + }, }, }, "rds": service{ @@ -15798,6 +17703,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16016,23 +17924,14 @@ var awsPartition = partition{ }, }, }, - "redshift": service{ + "rds-data": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -16042,29 +17941,144 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - }, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ - Region: "eu-north-1", + Region: "eu-west-1", }: endpoint{}, endpointKey{ - Region: "eu-south-1", + Region: "eu-west-2", }: endpoint{}, endpointKey{ - Region: "eu-west-1", + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", @@ -16117,6 +18131,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -16161,6 +18178,43 @@ var awsPartition = partition{ }, }, }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "rekognition": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -16414,6 +18468,70 @@ var awsPartition = partition{ }, }, }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -16569,43 +18687,104 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "rolesanywhere": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Region: "ap-east-1", + }: endpoint{}, endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Region: "ap-northeast-1", + }: endpoint{}, endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53-recovery-control-config": service{ - Endpoints: serviceEndpoints{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ - Region: "aws-global", - }: endpoint{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", @@ -16651,6 +18830,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -16870,6 +19052,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -17223,6 +19408,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -18144,6 +20338,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -18250,6 +20447,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18476,6 +20676,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -18736,6 +20939,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18745,6 +20951,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19636,6 +21845,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19772,6 +21984,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -19920,6 +22135,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20016,6 +22234,67 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20102,6 +22381,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20367,6 +22649,9 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20446,6 +22731,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20539,6 +22827,19 @@ var awsPartition = partition{ }, }, }, + "supportapp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "swf": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20625,6 +22926,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20719,6 +23023,45 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -20728,15 +23071,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, }, }, "tagging": service{ @@ -20789,6 +23156,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -21432,127 +23802,723 @@ var awsPartition = partition{ Hostname: "translate-fips.us-east-2.amazonaws.com", }, endpointKey{ - Region: "us-east-2-fips", + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + Hostname: "waf-regional.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "sa-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", + Region: "sa-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "sa-east-1", }, - Deprecated: boxedTrue, }, - }, - }, - "voiceid": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, endpointKey{ Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws", }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "aws", + Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-fips.amazonaws.com", + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "aws-fips", + Region: "us-east-2", }: endpoint{ - Hostname: "waf-fips.amazonaws.com", + Hostname: "waf-regional.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "aws-global", + Region: "us-east-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "waf.amazonaws.com", + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, }, endpointKey{ - Region: "aws-global", + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-fips.amazonaws.com", + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, }, endpointKey{ - Region: "aws-global-fips", + Region: "us-west-2", }: endpoint{ - Hostname: "waf-fips.amazonaws.com", + Hostname: "waf-regional.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, - Deprecated: boxedTrue, }, }, }, - "waf-regional": service{ + "wafv2": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", }: endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", + Hostname: "wafv2.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, @@ -21561,7 +24527,7 @@ var awsPartition = partition{ Region: "af-south-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + Hostname: "wafv2-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, @@ -21569,7 +24535,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-east-1", }: endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", + Hostname: "wafv2.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, @@ -21578,7 +24544,7 @@ var awsPartition = partition{ Region: "ap-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, @@ -21586,7 +24552,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + Hostname: "wafv2.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, @@ -21595,7 +24561,7 @@ var awsPartition = partition{ Region: "ap-northeast-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, @@ -21603,7 +24569,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + Hostname: "wafv2.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, @@ -21612,7 +24578,7 @@ var awsPartition = partition{ Region: "ap-northeast-2", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, @@ -21620,7 +24586,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-3", }: endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + Hostname: "wafv2.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, @@ -21629,7 +24595,7 @@ var awsPartition = partition{ Region: "ap-northeast-3", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, @@ -21637,7 +24603,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", + Hostname: "wafv2.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, @@ -21646,7 +24612,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, @@ -21654,7 +24620,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-1", }: endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + Hostname: "wafv2.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, @@ -21663,7 +24629,7 @@ var awsPartition = partition{ Region: "ap-southeast-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, @@ -21671,7 +24637,7 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + Hostname: "wafv2.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, @@ -21680,15 +24646,32 @@ var awsPartition = partition{ Region: "ap-southeast-2", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", + Hostname: "wafv2.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, @@ -21697,7 +24680,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, @@ -21705,7 +24688,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", + Hostname: "wafv2.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, @@ -21714,7 +24697,7 @@ var awsPartition = partition{ Region: "eu-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, @@ -21722,7 +24705,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-north-1", }: endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", + Hostname: "wafv2.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, @@ -21731,7 +24714,7 @@ var awsPartition = partition{ Region: "eu-north-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, @@ -21739,7 +24722,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", + Hostname: "wafv2.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, @@ -21748,7 +24731,7 @@ var awsPartition = partition{ Region: "eu-south-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, @@ -21756,7 +24739,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", + Hostname: "wafv2.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, @@ -21765,7 +24748,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, @@ -21773,7 +24756,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", + Hostname: "wafv2.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, @@ -21782,7 +24765,7 @@ var awsPartition = partition{ Region: "eu-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, @@ -21790,7 +24773,7 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", + Hostname: "wafv2.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, @@ -21799,7 +24782,7 @@ var awsPartition = partition{ Region: "eu-west-3", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, @@ -21807,7 +24790,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-af-south-1", }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + Hostname: "wafv2-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, @@ -21816,7 +24799,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-east-1", }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, @@ -21825,7 +24808,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-northeast-1", }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, @@ -21834,7 +24817,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-northeast-2", }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, @@ -21843,7 +24826,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-northeast-3", }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, @@ -21852,7 +24835,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-south-1", }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, @@ -21861,7 +24844,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-southeast-1", }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, @@ -21870,16 +24853,25 @@ var awsPartition = partition{ endpointKey{ Region: "fips-ap-southeast-2", }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, @@ -21888,7 +24880,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-central-1", }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, @@ -21897,7 +24889,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-north-1", }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, @@ -21906,7 +24898,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-south-1", }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, @@ -21915,7 +24907,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-west-1", }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, @@ -21924,7 +24916,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-west-2", }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, @@ -21933,7 +24925,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-eu-west-3", }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, @@ -21942,7 +24934,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-me-south-1", }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + Hostname: "wafv2-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, @@ -21951,7 +24943,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-sa-east-1", }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, @@ -21960,7 +24952,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-1", }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + Hostname: "wafv2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -21969,7 +24961,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-east-2", }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + Hostname: "wafv2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -21978,7 +24970,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-1", }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + Hostname: "wafv2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -21987,7 +24979,7 @@ var awsPartition = partition{ endpointKey{ Region: "fips-us-west-2", }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + Hostname: "wafv2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, @@ -21996,7 +24988,7 @@ var awsPartition = partition{ endpointKey{ Region: "me-south-1", }: endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", + Hostname: "wafv2.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, @@ -22005,7 +24997,7 @@ var awsPartition = partition{ Region: "me-south-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + Hostname: "wafv2-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, @@ -22013,7 +25005,7 @@ var awsPartition = partition{ endpointKey{ Region: "sa-east-1", }: endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", + Hostname: "wafv2.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, @@ -22022,7 +25014,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, @@ -22030,7 +25022,7 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", + Hostname: "wafv2.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -22039,7 +25031,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + Hostname: "wafv2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, @@ -22047,7 +25039,7 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", + Hostname: "wafv2.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -22056,7 +25048,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + Hostname: "wafv2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, @@ -22064,7 +25056,7 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", + Hostname: "wafv2.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -22073,7 +25065,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + Hostname: "wafv2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -22081,20 +25073,78 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", + Hostname: "wafv2.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, + Region: "us-west-2", + }: endpoint{}, }, }, "wisdom": service{ @@ -22261,9 +25311,30 @@ var awsPartition = partition{ }, "workspaces-web": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -22358,6 +25429,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -22544,6 +25618,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "appconfigdata": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -22584,9 +25668,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "appsync": service{ @@ -22673,6 +25769,16 @@ var awscnPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "ce": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -22874,6 +25980,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "dms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -23096,9 +26212,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "fms": service{ @@ -23191,13 +26319,23 @@ var awscnPartition = partition{ }, }, "health": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, Endpoints: serviceEndpoints{ endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "iam": service{ @@ -23378,6 +26516,16 @@ var awscnPartition = partition{ }, }, }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "monitoring": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -23974,6 +27122,62 @@ var awscnPartition = partition{ }, }, }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "workspaces": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24426,6 +27630,46 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, + }, + }, "appconfigdata": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24609,6 +27853,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -24657,6 +27911,26 @@ var awsusgovPartition = partition{ }, }, }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "cloudcontrolapi": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -25242,6 +28516,16 @@ var awsusgovPartition = partition{ }, }, }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "dms": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -26440,17 +29724,50 @@ var awsusgovPartition = partition{ }, "iotevents": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + }, }, }, "ioteventsdata": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{ - Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -26640,6 +29957,15 @@ var awsusgovPartition = partition{ }, "lakeformation": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -26649,6 +29975,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -26683,6 +30018,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -26692,6 +30033,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -27412,6 +30759,13 @@ var awsusgovPartition = partition{ }, }, }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -27913,42 +31267,12 @@ var awsusgovPartition = partition{ }, }, Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", - }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", - }, }, }, "servicediscovery": service{ @@ -28277,6 +31601,26 @@ var awsusgovPartition = partition{ }, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28531,12 +31875,42 @@ var awsusgovPartition = partition{ }, "synthetics": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + }, }, }, "tagging": service{ @@ -28640,6 +32014,16 @@ var awsusgovPartition = partition{ }, }, }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "transfer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28763,6 +32147,72 @@ var awsusgovPartition = partition{ }, }, }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "workspaces": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -28901,6 +32351,26 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29191,6 +32661,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "events": service{ @@ -29500,6 +32973,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sts": service{ @@ -29638,6 +33114,20 @@ var awsisobPartition = partition{ }, }, }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -29818,6 +33308,28 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, "elasticloadbalancing": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29935,6 +33447,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "monitoring": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -29942,6 +33468,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "rds": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -30085,5 +33618,12 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index 84316b92c0..66dec6bebf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -9,7 +9,7 @@ // AWS GovCloud (US) (aws-us-gov). // . // -// Enumerating Regions and Endpoint Metadata +// # Enumerating Regions and Endpoint Metadata // // Casting the Resolver returned by DefaultResolver to a EnumPartitions interface // will allow you to get access to the list of underlying Partitions with the @@ -17,22 +17,22 @@ // resolving to a single partition, or enumerate regions, services, and endpoints // in the partition. // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() // -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } // -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } // -// Using Custom Endpoints +// # Using Custom Endpoints // // The endpoints package also gives you the ability to use your own logic how // endpoints are resolved. This is a great way to define a custom endpoint @@ -47,20 +47,19 @@ // of Resolver.EndpointFor, converting it to a type that satisfies the // Resolver interface. // +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } // -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } // -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 880986157d..a686a48fa2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -353,10 +353,12 @@ type EnumPartitions interface { // as the second parameter. // // This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) // // This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +// +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { for _, p := range ps { if p.ID() != partitionID { @@ -423,8 +425,8 @@ func (p Partition) ID() string { return p.id } // of new regions and services expansions. // // Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError +// - UnknownServiceError +// - UnknownEndpointError func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index e819ab6c0e..9556332b65 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -330,6 +330,9 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { // WithSetRequestHeaders updates the operation request's HTTP header to contain // the header key value pairs provided. If the header key already exists in the // request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. func WithSetRequestHeaders(h map[string]string) Option { return withRequestHeader(h).SetRequestHeaders } @@ -338,6 +341,6 @@ type withRequestHeader map[string]string func (h withRequestHeader) SetRequestHeaders(r *Request) { for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} + r.HTTPRequest.Header.Set(k, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 49893b3941..551c8882a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.43.45" +const SDKVersion = "1.44.93" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 831b0110c5..2c0cbba909 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -3,6 +3,7 @@ package query import ( "encoding/xml" "fmt" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -62,7 +63,7 @@ func UnmarshalError(r *request.Request) { } r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), + awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), r.HTTPResponse.StatusCode, reqID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 41051ed43f..253cebaa84 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -32,14 +32,13 @@ const opBatchExecuteStatement = "BatchExecuteStatement" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchExecuteStatementRequest method. +// req, resp := client.BatchExecuteStatementRequest(params) // -// // Example sending a request using the BatchExecuteStatementRequest method. -// req, resp := client.BatchExecuteStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) { @@ -61,11 +60,18 @@ func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInpu // BatchExecuteStatement API operation for Amazon DynamoDB. // // This operation allows you to perform batch reads or writes on data stored -// in DynamoDB, using PartiQL. +// in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement +// must specify an equality condition on all key attributes. This enforces that +// each SELECT statement in a batch returns at most a single item. // // The entire batch must consist of either read statements or write statements, // you cannot mix both in one batch. // +// A HTTP 200 response does not mean that all statements in the BatchExecuteStatement +// succeeded. Error details for individual statements can be found under the +// Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error) +// field of the BatchStatementResponse for each statement. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -74,13 +80,14 @@ func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInpu // API operation BatchExecuteStatement for usage and error information. // // Returned Error Types: -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. // -// * InternalServerError -// An error occurred on the server side. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) { @@ -120,14 +127,13 @@ const opBatchGetItem = "BatchGetItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) // -// // Example sending a request using the BatchGetItemRequest method. -// req, resp := client.BatchGetItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { @@ -237,25 +243,26 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // API operation BatchGetItem for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { @@ -287,15 +294,14 @@ func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemI // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a BatchGetItem operation. -// pageNum := 0 -// err := client.BatchGetItemPages(params, -// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error { return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -347,14 +353,13 @@ const opBatchWriteItem = "BatchWriteItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) // -// // Example sending a request using the BatchWriteItemRequest method. -// req, resp := client.BatchWriteItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { @@ -455,24 +460,24 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // If one or more of the following is true, DynamoDB rejects the entire batch // write operation: // -// * One or more tables specified in the BatchWriteItem request does not -// exist. +// - One or more tables specified in the BatchWriteItem request does not +// exist. // -// * Primary key attributes specified on an item in the request do not match -// those in the corresponding table's primary key schema. +// - Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. // -// * You try to perform multiple operations on the same item in the same -// BatchWriteItem request. For example, you cannot put and delete the same -// item in the same BatchWriteItem request. +// - You try to perform multiple operations on the same item in the same +// BatchWriteItem request. For example, you cannot put and delete the same +// item in the same BatchWriteItem request. // -// * Your request contains at least two items with identical hash and range -// keys (which essentially is two put operations). +// - Your request contains at least two items with identical hash and range +// keys (which essentially is two put operations). // -// * There are more than 25 requests in the batch. +// - There are more than 25 requests in the batch. // -// * Any individual item in a batch exceeds 400 KB. +// - Any individual item in a batch exceeds 400 KB. // -// * The total request size exceeds 16 MB. +// - The total request size exceeds 16 MB. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -482,29 +487,30 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // API operation BatchWriteItem for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { @@ -544,14 +550,13 @@ const opCreateBackup = "CreateBackup" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateBackupRequest method. +// req, resp := client.CreateBackupRequest(params) // -// // Example sending a request using the CreateBackupRequest method. -// req, resp := client.CreateBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { @@ -619,13 +624,13 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // // Along with data, the following are also included on the backups: // -// * Global secondary indexes (GSIs) +// - Global secondary indexes (GSIs) // -// * Local secondary indexes (LSIs) +// - Local secondary indexes (LSIs) // -// * Streams +// - Streams // -// * Provisioned read and write capacity +// - Provisioned read and write capacity // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -635,36 +640,38 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // API operation CreateBackup for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. // -// * TableInUseException -// A target table with the specified name is either being created or deleted. +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. // -// * ContinuousBackupsUnavailableException -// Backups have not yet been enabled for this table. +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. // -// * BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { @@ -704,14 +711,13 @@ const opCreateGlobalTable = "CreateGlobalTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateGlobalTableRequest method. +// req, resp := client.CreateGlobalTableRequest(params) // -// // Example sending a request using the CreateGlobalTableRequest method. -// req, resp := client.CreateGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) { @@ -767,30 +773,30 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // If you want to add a new replica table to a global table, each of the following // conditions must be true: // -// * The table must have the same primary key as all of the other replicas. +// - The table must have the same primary key as all of the other replicas. // -// * The table must have the same name as all of the other replicas. +// - The table must have the same name as all of the other replicas. // -// * The table must have DynamoDB Streams enabled, with the stream containing -// both the new and the old images of the item. +// - The table must have DynamoDB Streams enabled, with the stream containing +// both the new and the old images of the item. // -// * None of the replica tables in the global table can contain any data. +// - None of the replica tables in the global table can contain any data. // // If global secondary indexes are specified, then the following conditions // must also be met: // -// * The global secondary indexes must have the same name. +// - The global secondary indexes must have the same name. // -// * The global secondary indexes must have the same hash key and sort key -// (if present). +// - The global secondary indexes must have the same hash key and sort key +// (if present). // // If local secondary indexes are specified, then the following conditions must // also be met: // -// * The local secondary indexes must have the same name. +// - The local secondary indexes must have the same name. // -// * The local secondary indexes must have the same hash key and sort key -// (if present). +// - The local secondary indexes must have the same hash key and sort key +// (if present). // // Write capacity settings should be set consistently across your replica tables // and secondary indexes. DynamoDB strongly recommends enabling auto scaling @@ -810,29 +816,31 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // API operation CreateGlobalTable for usage and error information. // // Returned Error Types: -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // -// * GlobalTableAlreadyExistsException -// The specified global table already exists. +// - GlobalTableAlreadyExistsException +// The specified global table already exists. // -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) { @@ -872,14 +880,13 @@ const opCreateTable = "CreateTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) // -// // Example sending a request using the CreateTableRequest method. -// req, resp := client.CreateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { @@ -950,27 +957,28 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // API operation CreateTable for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { @@ -1010,14 +1018,13 @@ const opDeleteBackup = "DeleteBackup" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteBackupRequest method. +// req, resp := client.DeleteBackupRequest(params) // -// // Example sending a request using the DeleteBackupRequest method. -// req, resp := client.DeleteBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { @@ -1075,29 +1082,30 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R // API operation DeleteBackup for usage and error information. // // Returned Error Types: -// * BackupNotFoundException -// Backup not found for the given BackupARN. // -// * BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { @@ -1137,14 +1145,13 @@ const opDeleteItem = "DeleteItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) // -// // Example sending a request using the DeleteItemRequest method. -// req, resp := client.DeleteItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { @@ -1213,35 +1220,36 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque // API operation DeleteItem for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. // -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { @@ -1281,14 +1289,13 @@ const opDeleteTable = "DeleteTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) // -// // Example sending a request using the DeleteTableRequest method. -// req, resp := client.DeleteTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { @@ -1361,31 +1368,32 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req // API operation DeleteTable for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { @@ -1425,14 +1433,13 @@ const opDescribeBackup = "DescribeBackup" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeBackupRequest method. +// req, resp := client.DescribeBackupRequest(params) // -// // Example sending a request using the DescribeBackupRequest method. -// req, resp := client.DescribeBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) { @@ -1490,11 +1497,12 @@ func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *reque // API operation DescribeBackup for usage and error information. // // Returned Error Types: -// * BackupNotFoundException -// Backup not found for the given BackupARN. // -// * InternalServerError -// An error occurred on the server side. +// - BackupNotFoundException +// Backup not found for the given BackupARN. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) { @@ -1534,14 +1542,13 @@ const opDescribeContinuousBackups = "DescribeContinuousBackups" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeContinuousBackupsRequest method. +// req, resp := client.DescribeContinuousBackupsRequest(params) // -// // Example sending a request using the DescribeContinuousBackupsRequest method. -// req, resp := client.DescribeContinuousBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) { @@ -1609,12 +1616,14 @@ func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBac // API operation DescribeContinuousBackups for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. // -// * InternalServerError -// An error occurred on the server side. +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) { @@ -1654,14 +1663,13 @@ const opDescribeContributorInsights = "DescribeContributorInsights" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeContributorInsightsRequest method. +// req, resp := client.DescribeContributorInsightsRequest(params) // -// // Example sending a request using the DescribeContributorInsightsRequest method. -// req, resp := client.DescribeContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributorInsightsInput) (req *request.Request, output *DescribeContributorInsightsOutput) { @@ -1693,12 +1701,13 @@ func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributor // API operation DescribeContributorInsights for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights func (c *DynamoDB) DescribeContributorInsights(input *DescribeContributorInsightsInput) (*DescribeContributorInsightsOutput, error) { @@ -1738,14 +1747,13 @@ const opDescribeEndpoints = "DescribeEndpoints" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeEndpointsRequest method. +// req, resp := client.DescribeEndpointsRequest(params) // -// // Example sending a request using the DescribeEndpointsRequest method. -// req, resp := client.DescribeEndpointsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { @@ -1884,14 +1892,13 @@ const opDescribeExport = "DescribeExport" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeExportRequest method. +// req, resp := client.DescribeExportRequest(params) // -// // Example sending a request using the DescribeExportRequest method. -// req, resp := client.DescribeExportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) { @@ -1922,25 +1929,26 @@ func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *reque // API operation DescribeExport for usage and error information. // // Returned Error Types: -// * ExportNotFoundException -// The specified export was not found. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - ExportNotFoundException +// The specified export was not found. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * InternalServerError -// An error occurred on the server side. +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) { @@ -1980,14 +1988,13 @@ const opDescribeGlobalTable = "DescribeGlobalTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeGlobalTableRequest method. +// req, resp := client.DescribeGlobalTableRequest(params) // -// // Example sending a request using the DescribeGlobalTableRequest method. -// req, resp := client.DescribeGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) { @@ -2048,11 +2055,12 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( // API operation DescribeGlobalTable for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. // -// * GlobalTableNotFoundException -// The specified global table does not exist. +// - InternalServerError +// An error occurred on the server side. +// +// - GlobalTableNotFoundException +// The specified global table does not exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) { @@ -2092,14 +2100,13 @@ const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeGlobalTableSettingsRequest method. +// req, resp := client.DescribeGlobalTableSettingsRequest(params) // -// // Example sending a request using the DescribeGlobalTableSettingsRequest method. -// req, resp := client.DescribeGlobalTableSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) { @@ -2158,11 +2165,12 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable // API operation DescribeGlobalTableSettings for usage and error information. // // Returned Error Types: -// * GlobalTableNotFoundException -// The specified global table does not exist. // -// * InternalServerError -// An error occurred on the server side. +// - GlobalTableNotFoundException +// The specified global table does not exist. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) { @@ -2186,6 +2194,84 @@ func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeImport = "DescribeImport" + +// DescribeImportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImport for more information on using the DescribeImport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DescribeImportRequest method. +// req, resp := client.DescribeImportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImportRequest(input *DescribeImportInput) (req *request.Request, output *DescribeImportOutput) { + op := &request.Operation{ + Name: opDescribeImport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeImportInput{} + } + + output = &DescribeImportOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImport API operation for Amazon DynamoDB. +// +// Represents the properties of the import. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeImport for usage and error information. +// +// Returned Error Types: +// - ImportNotFoundException +// The specified import was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport +func (c *DynamoDB) DescribeImport(input *DescribeImportInput) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + return out, req.Send() +} + +// DescribeImportWithContext is the same as DescribeImport with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeImportWithContext(ctx aws.Context, input *DescribeImportInput, opts ...request.Option) (*DescribeImportOutput, error) { + req, out := c.DescribeImportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination" // DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the @@ -2202,14 +2288,13 @@ const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestinati // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. +// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) // -// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. -// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) { @@ -2265,12 +2350,13 @@ func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKin // API operation DescribeKinesisStreamingDestination for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) { @@ -2310,14 +2396,13 @@ const opDescribeLimits = "DescribeLimits" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) // -// // Example sending a request using the DescribeLimitsRequest method. -// req, resp := client.DescribeLimitsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { @@ -2395,14 +2480,14 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // // For each table name listed by ListTables, do the following: // -// * Call DescribeTable with the table name. +// - Call DescribeTable with the table name. // -// * Use the data returned by DescribeTable to add the read capacity units -// and write capacity units provisioned for the table itself to your variables. +// - Use the data returned by DescribeTable to add the read capacity units +// and write capacity units provisioned for the table itself to your variables. // -// * If the table has one or more global secondary indexes (GSIs), loop over -// these GSIs and add their provisioned capacity values to your variables -// as well. +// - If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables +// as well. // // Report the account quotas for that Region returned by DescribeLimits, along // with the total current provisioned capacity levels you have calculated. @@ -2432,8 +2517,8 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // API operation DescribeLimits for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { @@ -2473,14 +2558,13 @@ const opDescribeTable = "DescribeTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) // -// // Example sending a request using the DescribeTableRequest method. -// req, resp := client.DescribeTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { @@ -2544,12 +2628,13 @@ func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request // API operation DescribeTable for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { @@ -2589,14 +2674,13 @@ const opDescribeTableReplicaAutoScaling = "DescribeTableReplicaAutoScaling" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method. +// req, resp := client.DescribeTableReplicaAutoScalingRequest(params) // -// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method. -// req, resp := client.DescribeTableReplicaAutoScalingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableReplicaAutoScalingInput) (req *request.Request, output *DescribeTableReplicaAutoScalingOutput) { @@ -2630,12 +2714,13 @@ func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableRe // API operation DescribeTableReplicaAutoScaling for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling func (c *DynamoDB) DescribeTableReplicaAutoScaling(input *DescribeTableReplicaAutoScalingInput) (*DescribeTableReplicaAutoScalingOutput, error) { @@ -2675,14 +2760,13 @@ const opDescribeTimeToLive = "DescribeTimeToLive" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DescribeTimeToLiveRequest method. +// req, resp := client.DescribeTimeToLiveRequest(params) // -// // Example sending a request using the DescribeTimeToLiveRequest method. -// req, resp := client.DescribeTimeToLiveRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) { @@ -2738,12 +2822,13 @@ func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (re // API operation DescribeTimeToLive for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) { @@ -2783,14 +2868,13 @@ const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. +// req, resp := client.DisableKinesisStreamingDestinationRequest(params) // -// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. -// req, resp := client.DisableKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) { @@ -2847,31 +2931,32 @@ func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKines // API operation DisableKinesisStreamingDestination for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - InternalServerError +// An error occurred on the server side. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// There is a soft account quota of 2,500 tables. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) { @@ -2911,14 +2996,13 @@ const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. +// req, resp := client.EnableKinesisStreamingDestinationRequest(params) // -// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. -// req, resp := client.EnableKinesisStreamingDestinationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) { @@ -2977,31 +3061,32 @@ func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesis // API operation EnableKinesisStreamingDestination for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - InternalServerError +// An error occurred on the server side. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// There is a soft account quota of 2,500 tables. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) { @@ -3041,14 +3126,13 @@ const opExecuteStatement = "ExecuteStatement" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ExecuteStatementRequest method. +// req, resp := client.ExecuteStatementRequest(params) // -// // Example sending a request using the ExecuteStatementRequest method. -// req, resp := client.ExecuteStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) { @@ -3091,39 +3175,40 @@ func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *r // API operation ExecuteStatement for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. // -// * InternalServerError -// An error occurred on the server side. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. // -// * DuplicateItemException -// There was an attempt to insert an item with the same primary key as an item -// that already exists in the DynamoDB table. +// - InternalServerError +// An error occurred on the server side. +// +// - DuplicateItemException +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) { @@ -3163,14 +3248,13 @@ const opExecuteTransaction = "ExecuteTransaction" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ExecuteTransactionRequest method. +// req, resp := client.ExecuteTransactionRequest(params) // -// // Example sending a request using the ExecuteTransactionRequest method. -// req, resp := client.ExecuteTransactionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) { @@ -3208,119 +3292,120 @@ func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (re // API operation ExecuteTransaction for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * TransactionCanceledException -// The entire transaction request was canceled. // -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// * A condition in one of the condition expressions is not met. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * A table in the TransactWriteItems request is in a different account -// or region. +// - TransactionCanceledException +// The entire transaction request was canceled. // -// * More than one action in the TransactWriteItems operation targets the -// same item. +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - A condition in one of the condition expressions is not met. // -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// - A table in the TransactWriteItems request is in a different account +// or region. // -// * There is a user error, such as an invalid data format. +// - More than one action in the TransactWriteItems operation targets the +// same item. // -// DynamoDB cancels a TransactGetItems request under the following circumstances: +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// * A table in the TransactGetItems request is in a different account or -// region. +// - There is a user error, such as an invalid data format. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// DynamoDB cancels a TransactGetItems request under the following circumstances: // -// * There is a user error, such as an invalid data format. +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. // -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. +// - A table in the TransactGetItems request is in a different account or +// region. // -// Cancellation reason codes and possible error messages: +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * No Errors: Code: NONE Message: null +// - There is a user error, such as an invalid data format. // -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. // -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. +// Cancellation reason codes and possible error messages: // -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. +// - No Errors: Code: None Message: null // -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. // -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. // -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. // -// * TransactionInProgressException -// The transaction with the given request token is already in progress. +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. // -// * IdempotentParameterMismatchException -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// - TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) { @@ -3360,14 +3445,13 @@ const opExportTableToPointInTime = "ExportTableToPointInTime" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) // -// // Example sending a request using the ExportTableToPointInTimeRequest method. -// req, resp := client.ExportTableToPointInTimeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { @@ -3400,35 +3484,37 @@ func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTi // API operation ExportTableToPointInTime for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. // -// * PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * InvalidExportTimeException -// The specified ExportTime is outside of the point in time recovery window. +// There is a soft account quota of 2,500 tables. // -// * ExportConflictException -// There was a conflict when writing to the specified S3 bucket. +// - InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. // -// * InternalServerError -// An error occurred on the server side. +// - ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { @@ -3468,14 +3554,13 @@ const opGetItem = "GetItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) // -// // Example sending a request using the GetItemRequest method. -// req, resp := client.GetItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { @@ -3538,25 +3623,26 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // API operation GetItem for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { @@ -3580,6 +3666,106 @@ func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts return out, req.Send() } +const opImportTable = "ImportTable" + +// ImportTableRequest generates a "aws/request.Request" representing the +// client's request for the ImportTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportTable for more information on using the ImportTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ImportTableRequest method. +// req, resp := client.ImportTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Request, output *ImportTableOutput) { + op := &request.Operation{ + Name: opImportTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportTableInput{} + } + + output = &ImportTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportTable API operation for Amazon DynamoDB. +// +// Imports table data from an S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ImportTable for usage and error information. +// +// Returned Error Types: +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 2,500 tables. +// +// - ImportConflictException +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable +func (c *DynamoDB) ImportTable(input *ImportTableInput) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + return out, req.Send() +} + +// ImportTableWithContext is the same as ImportTable with the addition of +// the ability to pass a context and additional request options. +// +// See ImportTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ImportTableWithContext(ctx aws.Context, input *ImportTableInput, opts ...request.Option) (*ImportTableOutput, error) { + req, out := c.ImportTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBackups = "ListBackups" // ListBackupsRequest generates a "aws/request.Request" representing the @@ -3596,14 +3782,13 @@ const opListBackups = "ListBackups" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListBackupsRequest method. +// req, resp := client.ListBackupsRequest(params) // -// // Example sending a request using the ListBackupsRequest method. -// req, resp := client.ListBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { @@ -3667,8 +3852,8 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // API operation ListBackups for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { @@ -3708,14 +3893,13 @@ const opListContributorInsights = "ListContributorInsights" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListContributorInsightsRequest method. +// req, resp := client.ListContributorInsightsRequest(params) // -// // Example sending a request using the ListContributorInsightsRequest method. -// req, resp := client.ListContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { @@ -3753,12 +3937,13 @@ func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsights // API operation ListContributorInsights for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { @@ -3790,15 +3975,14 @@ func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *Li // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListContributorInsights operation. -// pageNum := 0 -// err := client.ListContributorInsightsPages(params, -// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListContributorInsights operation. +// pageNum := 0 +// err := client.ListContributorInsightsPages(params, +// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -3850,14 +4034,13 @@ const opListExports = "ListExports" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) // -// // Example sending a request using the ListExportsRequest method. -// req, resp := client.ListExportsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { @@ -3894,22 +4077,23 @@ func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Req // API operation ListExports for usage and error information. // // Returned Error Types: -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * InternalServerError -// An error occurred on the server side. +// There is a soft account quota of 2,500 tables. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { @@ -3941,15 +4125,14 @@ func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInp // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListExports operation. -// pageNum := 0 -// err := client.ListExportsPages(params, -// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -4001,14 +4184,13 @@ const opListGlobalTables = "ListGlobalTables" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) // -// // Example sending a request using the ListGlobalTablesRequest method. -// req, resp := client.ListGlobalTablesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { @@ -4067,8 +4249,8 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r // API operation ListGlobalTables for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { @@ -4092,155 +4274,138 @@ func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGloba return out, req.Send() } -const opListTables = "ListTables" +const opListImports = "ListImports" -// ListTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListTables operation. The "output" return +// ListImportsRequest generates a "aws/request.Request" representing the +// client's request for the ListImports operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTables for more information on using the ListTables +// See ListImports for more information on using the ListImports // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListImportsRequest method. +// req, resp := client.ListImportsRequest(params) // -// // Example sending a request using the ListTablesRequest method. -// req, resp := client.ListTablesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Request, output *ListImportsOutput) { op := &request.Operation{ - Name: opListTables, + Name: opListImports, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartTableName"}, - OutputTokens: []string{"LastEvaluatedTableName"}, - LimitToken: "Limit", + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "PageSize", TruncationToken: "", }, } if input == nil { - input = &ListTablesInput{} + input = &ListImportsInput{} } - output = &ListTablesOutput{} + output = &ListImportsOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// ListTables API operation for Amazon DynamoDB. +// ListImports API operation for Amazon DynamoDB. // -// Returns an array of table names associated with the current account and endpoint. -// The output from ListTables is paginated, with each page returning a maximum -// of 100 table names. +// Lists completed imports within the past 90 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListTables for usage and error information. +// API operation ListImports for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 2,500 tables. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports +func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) return out, req.Send() } -// ListTablesWithContext is the same as ListTables with the addition of +// ListImportsWithContext is the same as ListImports with the addition of // the ability to pass a context and additional request options. // -// See ListTables for details on how to use this API operation. +// See ListImports for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) +func (c *DynamoDB) ListImportsWithContext(ctx aws.Context, input *ListImportsInput, opts ...request.Option) (*ListImportsOutput, error) { + req, out := c.ListImportsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTablesPages iterates over the pages of a ListTables operation, +// ListImportsPages iterates over the pages of a ListImports operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListTables method for more information on how to use this operation. +// See ListImports method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTables operation. -// pageNum := 0 -// err := client.ListTablesPages(params, -// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { - return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +// // Example iterating over at most 3 pages of a ListImports operation. +// pageNum := 0 +// err := client.ListImportsPages(params, +// func(page *dynamodb.ListImportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListImportsPages(input *ListImportsInput, fn func(*ListImportsOutput, bool) bool) error { + return c.ListImportsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListTablesPagesWithContext same as ListTablesPages except +// ListImportsPagesWithContext same as ListImportsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { +func (c *DynamoDB) ListImportsPagesWithContext(ctx aws.Context, input *ListImportsInput, fn func(*ListImportsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListTablesInput + var inCpy *ListImportsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListTablesRequest(inCpy) + req, _ := c.ListImportsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -4248,7 +4413,7 @@ func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTables } for p.Next() { - if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) { break } } @@ -4256,30 +4421,191 @@ func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTables return p.Err() } -const opListTagsOfResource = "ListTagsOfResource" +const opListTables = "ListTables" -// ListTagsOfResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsOfResource operation. The "output" return +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsOfResource for more information on using the ListTagsOfResource +// See ListTables for more information on using the ListTables // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + output = &ListTablesOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// ListTables API operation for Amazon DynamoDB. +// +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTables for usage and error information. +// +// Returned Error Types: +// - InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + return out, req.Send() +} + +// ListTablesWithContext is the same as ListTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { + return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTablesPagesWithContext same as ListTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsOfResource = "ListTagsOfResource" + +// ListTagsOfResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsOfResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsOfResource for more information on using the ListTagsOfResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListTagsOfResourceRequest method. -// req, resp := client.ListTagsOfResourceRequest(params) +// // Example sending a request using the ListTagsOfResourceRequest method. +// req, resp := client.ListTagsOfResourceRequest(params) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { @@ -4339,12 +4665,13 @@ func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (re // API operation ListTagsOfResource for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { @@ -4384,14 +4711,13 @@ const opPutItem = "PutItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) // -// // Example sending a request using the PutItemRequest method. -// req, resp := client.PutItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { @@ -4445,29 +4771,6 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // values. You can return the item's attribute values in the same operation, // using the ReturnValues parameter. // -// This topic provides general information about the PutItem API. -// -// For information on how to call the PutItem API using the Amazon Web Services -// SDK in specific languages, see the following: -// -// * PutItem in the Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for Python (Boto) (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) -// // When you add an item, the primary key attributes are the only required attributes. // Attribute values cannot be null. // @@ -4496,35 +4799,36 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // API operation PutItem for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. // -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { @@ -4564,14 +4868,13 @@ const opQuery = "Query" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) // -// // Example sending a request using the QueryRequest method. -// req, resp := client.QueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { @@ -4681,25 +4984,26 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // API operation Query for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { @@ -4731,15 +5035,14 @@ func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ... // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a Query operation. -// pageNum := 0 -// err := client.QueryPages(params, -// func(page *dynamodb.QueryOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *dynamodb.QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -4791,14 +5094,13 @@ const opRestoreTableFromBackup = "RestoreTableFromBackup" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the RestoreTableFromBackupRequest method. +// req, resp := client.RestoreTableFromBackupRequest(params) // -// // Example sending a request using the RestoreTableFromBackupRequest method. -// req, resp := client.RestoreTableFromBackupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { @@ -4851,17 +5153,17 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // // You must manually set up the following on the restored table: // -// * Auto scaling policies +// - Auto scaling policies // -// * IAM policies +// - IAM policies // -// * Amazon CloudWatch metrics and alarms +// - Amazon CloudWatch metrics and alarms // -// * Tags +// - Tags // -// * Stream settings +// - Stream settings // -// * Time to Live (TTL) settings +// - Time to Live (TTL) settings // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4871,35 +5173,36 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // API operation RestoreTableFromBackup for usage and error information. // // Returned Error Types: -// * TableAlreadyExistsException -// A target table with the specified name already exists. // -// * TableInUseException -// A target table with the specified name is either being created or deleted. +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableInUseException +// A target table with the specified name is either being created or deleted. // -// * BackupNotFoundException -// Backup not found for the given BackupARN. +// - BackupNotFoundException +// Backup not found for the given BackupARN. // -// * BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. +// - BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { @@ -4939,14 +5242,13 @@ const opRestoreTableToPointInTime = "RestoreTableToPointInTime" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the RestoreTableToPointInTimeRequest method. +// req, resp := client.RestoreTableToPointInTimeRequest(params) // -// // Example sending a request using the RestoreTableToPointInTimeRequest method. -// req, resp := client.RestoreTableToPointInTimeRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { @@ -5004,30 +5306,30 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // Along with data, the following are also included on the new restored table // using point in time recovery: // -// * Global secondary indexes (GSIs) +// - Global secondary indexes (GSIs) // -// * Local secondary indexes (LSIs) +// - Local secondary indexes (LSIs) // -// * Provisioned read and write capacity +// - Provisioned read and write capacity // -// * Encryption settings All these settings come from the current settings -// of the source table at the time of restore. +// - Encryption settings All these settings come from the current settings +// of the source table at the time of restore. // // You must manually set up the following on the restored table: // -// * Auto scaling policies +// - Auto scaling policies // -// * IAM policies +// - IAM policies // -// * Amazon CloudWatch metrics and alarms +// - Amazon CloudWatch metrics and alarms // -// * Tags +// - Tags // -// * Stream settings +// - Stream settings // -// * Time to Live (TTL) settings +// - Time to Live (TTL) settings // -// * Point in time recovery settings +// - Point in time recovery settings // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5037,39 +5339,41 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // API operation RestoreTableToPointInTime for usage and error information. // // Returned Error Types: -// * TableAlreadyExistsException -// A target table with the specified name already exists. // -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// - TableAlreadyExistsException +// A target table with the specified name already exists. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. // -// * TableInUseException -// A target table with the specified name is either being created or deleted. +// - TableInUseException +// A target table with the specified name is either being created or deleted. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InvalidRestoreTimeException -// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime -// and LatestRestorableDateTime. +// - InvalidRestoreTimeException +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. // -// * PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. +// - PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { @@ -5109,14 +5413,13 @@ const opScan = "Scan" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) // -// // Example sending a request using the ScanRequest method. -// req, resp := client.ScanRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { @@ -5205,25 +5508,26 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // API operation Scan for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { @@ -5255,15 +5559,14 @@ func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...re // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a Scan operation. -// pageNum := 0 -// err := client.ScanPages(params, -// func(page *dynamodb.ScanOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -5315,14 +5618,13 @@ const opTagResource = "TagResource" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { @@ -5385,31 +5687,32 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req // API operation TagResource for usage and error information. // // Returned Error Types: -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -5449,14 +5752,13 @@ const opTransactGetItems = "TransactGetItems" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the TransactGetItemsRequest method. +// req, resp := client.TransactGetItemsRequest(params) // -// // Example sending a request using the TransactGetItemsRequest method. -// req, resp := client.TransactGetItemsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { @@ -5514,15 +5816,15 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // DynamoDB rejects the entire TransactGetItems request if any of the following // is true: // -// * A conflicting operation is in the process of updating an item to be -// read. +// - A conflicting operation is in the process of updating an item to be +// read. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * There is a user error, such as an invalid data format. +// - There is a user error, such as an invalid data format. // -// * The aggregate size of the items in the transaction cannot exceed 4 MB. +// - The aggregate size of the items in the transaction cannot exceed 4 MB. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5532,112 +5834,113 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // API operation TransactGetItems for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * TransactionCanceledException -// The entire transaction request was canceled. // -// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. // -// * A condition in one of the condition expressions is not met. +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// * A table in the TransactWriteItems request is in a different account -// or region. +// - A condition in one of the condition expressions is not met. // -// * More than one action in the TransactWriteItems operation targets the -// same item. +// - A table in the TransactWriteItems request is in a different account +// or region. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - More than one action in the TransactWriteItems operation targets the +// same item. // -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * There is a user error, such as an invalid data format. +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// DynamoDB cancels a TransactGetItems request under the following circumstances: +// - There is a user error, such as an invalid data format. // -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// DynamoDB cancels a TransactGetItems request under the following circumstances: // -// * A table in the TransactGetItems request is in a different account or -// region. +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - A table in the TransactGetItems request is in a different account or +// region. // -// * There is a user error, such as an invalid data format. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. +// - There is a user error, such as an invalid data format. // -// Cancellation reason codes and possible error messages: +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. // -// * No Errors: Code: NONE Message: null +// Cancellation reason codes and possible error messages: // -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. +// - No Errors: Code: None Message: null // -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. // -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. // -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. // -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. // -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * InternalServerError -// An error occurred on the server side. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { @@ -5677,14 +5980,13 @@ const opTransactWriteItems = "TransactWriteItems" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the TransactWriteItemsRequest method. +// req, resp := client.TransactWriteItemsRequest(params) // -// // Example sending a request using the TransactWriteItemsRequest method. -// req, resp := client.TransactWriteItemsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { @@ -5740,50 +6042,50 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // The actions are completed atomically so that either all of them succeed, // or all of them fail. They are defined by the following objects: // -// * Put — Initiates a PutItem operation to write a new item. This structure -// specifies the primary key of the item to be written, the name of the table -// to write it in, an optional condition expression that must be satisfied -// for the write to succeed, a list of the item's attributes, and a field -// indicating whether to retrieve the item's attributes if the condition -// is not met. -// -// * Update — Initiates an UpdateItem operation to update an existing item. -// This structure specifies the primary key of the item to be updated, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the update to succeed, an expression that defines -// one or more attributes to be updated, and a field indicating whether to -// retrieve the item's attributes if the condition is not met. -// -// * Delete — Initiates a DeleteItem operation to delete an existing item. -// This structure specifies the primary key of the item to be deleted, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the deletion to succeed, and a field indicating -// whether to retrieve the item's attributes if the condition is not met. -// -// * ConditionCheck — Applies a condition to an item that is not being -// modified by the transaction. This structure specifies the primary key -// of the item to be checked, the name of the table where it resides, a condition -// expression that must be satisfied for the transaction to succeed, and -// a field indicating whether to retrieve the item's attributes if the condition -// is not met. +// - Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table +// to write it in, an optional condition expression that must be satisfied +// for the write to succeed, a list of the item's attributes, and a field +// indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// - Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. +// +// - Delete — Initiates a DeleteItem operation to delete an existing item. +// This structure specifies the primary key of the item to be deleted, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the deletion to succeed, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// - ConditionCheck — Applies a condition to an item that is not being +// modified by the transaction. This structure specifies the primary key +// of the item to be checked, the name of the table where it resides, a condition +// expression that must be satisfied for the transaction to succeed, and +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. // // DynamoDB rejects the entire TransactWriteItems request if any of the following // is true: // -// * A condition in one of the condition expressions is not met. +// - A condition in one of the condition expressions is not met. // -// * An ongoing operation is in the process of updating the same item. +// - An ongoing operation is in the process of updating the same item. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * An item size becomes too large (bigger than 400 KB), a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// - An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// * The aggregate size of the items in the transaction exceeds 4 MB. +// - The aggregate size of the items in the transaction exceeds 4 MB. // -// * There is a user error, such as an invalid data format. +// - There is a user error, such as an invalid data format. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5793,119 +6095,120 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // API operation TransactWriteItems for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * TransactionCanceledException -// The entire transaction request was canceled. // -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// * A condition in one of the condition expressions is not met. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// * A table in the TransactWriteItems request is in a different account -// or region. +// - A condition in one of the condition expressions is not met. // -// * More than one action in the TransactWriteItems operation targets the -// same item. +// - A table in the TransactWriteItems request is in a different account +// or region. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - More than one action in the TransactWriteItems operation targets the +// same item. // -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * There is a user error, such as an invalid data format. +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// DynamoDB cancels a TransactGetItems request under the following circumstances: +// - There is a user error, such as an invalid data format. // -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// DynamoDB cancels a TransactGetItems request under the following circumstances: // -// * A table in the TransactGetItems request is in a different account or -// region. +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - A table in the TransactGetItems request is in a different account or +// region. // -// * There is a user error, such as an invalid data format. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. +// - There is a user error, such as an invalid data format. // -// Cancellation reason codes and possible error messages: +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have None code and Null message. // -// * No Errors: Code: NONE Message: null +// Cancellation reason codes and possible error messages: // -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. +// - No Errors: Code: None Message: null // -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. // -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. // -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. // -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. // -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. // -// * TransactionInProgressException -// The transaction with the given request token is already in progress. +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. // -// * IdempotentParameterMismatchException -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. +// - TransactionInProgressException +// The transaction with the given request token is already in progress. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. -// -// * InternalServerError -// An error occurred on the server side. +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { @@ -5945,14 +6248,13 @@ const opUntagResource = "UntagResource" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { @@ -6013,31 +6315,32 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // API operation UntagResource for usage and error information. // // Returned Error Types: -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// - InternalServerError +// An error occurred on the server side. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -6077,14 +6380,13 @@ const opUpdateContinuousBackups = "UpdateContinuousBackups" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateContinuousBackupsRequest method. +// req, resp := client.UpdateContinuousBackupsRequest(params) // -// // Example sending a request using the UpdateContinuousBackupsRequest method. -// req, resp := client.UpdateContinuousBackupsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { @@ -6150,15 +6452,17 @@ func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackups // API operation UpdateContinuousBackups for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. // -// * ContinuousBackupsUnavailableException -// Backups have not yet been enabled for this table. +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. // -// * InternalServerError -// An error occurred on the server side. +// - ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { @@ -6198,14 +6502,13 @@ const opUpdateContributorInsights = "UpdateContributorInsights" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateContributorInsightsRequest method. +// req, resp := client.UpdateContributorInsightsRequest(params) // -// // Example sending a request using the UpdateContributorInsightsRequest method. -// req, resp := client.UpdateContributorInsightsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { @@ -6243,12 +6546,13 @@ func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsi // API operation UpdateContributorInsights for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { @@ -6288,14 +6592,13 @@ const opUpdateGlobalTable = "UpdateGlobalTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateGlobalTableRequest method. +// req, resp := client.UpdateGlobalTableRequest(params) // -// // Example sending a request using the UpdateGlobalTableRequest method. -// req, resp := client.UpdateGlobalTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { @@ -6354,13 +6657,13 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // If global secondary indexes are specified, then the following conditions // must also be met: // -// * The global secondary indexes must have the same name. +// - The global secondary indexes must have the same name. // -// * The global secondary indexes must have the same hash key and sort key -// (if present). +// - The global secondary indexes must have the same hash key and sort key +// (if present). // -// * The global secondary indexes must have the same provisioned and maximum -// write capacity units. +// - The global secondary indexes must have the same provisioned and maximum +// write capacity units. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6370,21 +6673,23 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req // API operation UpdateGlobalTable for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. // -// * GlobalTableNotFoundException -// The specified global table does not exist. +// - InternalServerError +// An error occurred on the server side. // -// * ReplicaAlreadyExistsException -// The specified replica is already part of the global table. +// - GlobalTableNotFoundException +// The specified global table does not exist. // -// * ReplicaNotFoundException -// The specified replica is no longer part of the global table. +// - ReplicaAlreadyExistsException +// The specified replica is already part of the global table. // -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// - TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { @@ -6424,14 +6729,13 @@ const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateGlobalTableSettingsRequest method. +// req, resp := client.UpdateGlobalTableSettingsRequest(params) // -// // Example sending a request using the UpdateGlobalTableSettingsRequest method. -// req, resp := client.UpdateGlobalTableSettingsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { @@ -6487,36 +6791,37 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // API operation UpdateGlobalTableSettings for usage and error information. // // Returned Error Types: -// * GlobalTableNotFoundException -// The specified global table does not exist. // -// * ReplicaNotFoundException -// The specified replica is no longer part of the global table. +// - GlobalTableNotFoundException +// The specified global table does not exist. // -// * IndexNotFoundException -// The operation tried to access a nonexistent index. +// - ReplicaNotFoundException +// The specified replica is no longer part of the global table. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - IndexNotFoundException +// The operation tried to access a nonexistent index. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// There is a soft account quota of 256 tables. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { @@ -6556,14 +6861,13 @@ const opUpdateItem = "UpdateItem" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) // -// // Example sending a request using the UpdateItemRequest method. -// req, resp := client.UpdateItemRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { @@ -6626,35 +6930,36 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque // API operation UpdateItem for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB -// automatically retry requests that receive this exception. Your request is -// eventually successful, unless your retry queue is too large to finish. Reduce -// the frequency of requests and use exponential backoff. For more information, -// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// - ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ProvisionedThroughputExceededException +// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB +// automatically retry requests that receive this exception. Your request is +// eventually successful, unless your retry queue is too large to finish. Reduce +// the frequency of requests and use exponential backoff. For more information, +// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. +// - ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact Amazon Web Services Support (https://aws.amazon.com/support) to request -// a quota increase. +// - TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. // -// * InternalServerError -// An error occurred on the server side. +// - RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact Amazon Web Services Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { @@ -6694,14 +6999,13 @@ const opUpdateTable = "UpdateTable" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) // -// // Example sending a request using the UpdateTableRequest method. -// req, resp := client.UpdateTableRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { @@ -6752,14 +7056,12 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // // You can only perform one of the following operations at once: // -// * Modify the provisioned throughput settings of the table. +// - Modify the provisioned throughput settings of the table. // -// * Enable or disable DynamoDB Streams on the table. +// - Remove a global secondary index from the table. // -// * Remove a global secondary index from the table. -// -// * Create a new global secondary index on the table. After the index begins -// backfilling, you can use UpdateTable to perform other operations. +// - Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. // // UpdateTable is an asynchronous operation; while it is executing, the table // status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot @@ -6774,31 +7076,32 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // API operation UpdateTable for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { @@ -6838,14 +7141,13 @@ const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. +// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) // -// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. -// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { @@ -6879,31 +7181,32 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // API operation UpdateTableReplicaAutoScaling for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { @@ -6943,14 +7246,13 @@ const opUpdateTimeToLive = "UpdateTimeToLive" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the UpdateTimeToLiveRequest method. +// req, resp := client.UpdateTimeToLiveRequest(params) // -// // Example sending a request using the UpdateTimeToLiveRequest method. -// req, resp := client.UpdateTimeToLiveRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { @@ -7033,31 +7335,32 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // API operation UpdateTimeToLive for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// - ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// - ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// - LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// Up to 500 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 250 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. // -// * InternalServerError -// An error occurred on the server side. +// - InternalServerError +// An error occurred on the server side. // // See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { @@ -7237,7 +7540,7 @@ type AttributeValue struct { // An attribute of type List. For example: // - // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}] + // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}] L []*AttributeValue `type:"list"` // An attribute of type Map. For example: @@ -7421,9 +7724,9 @@ type AttributeValueUpdate struct { // // * DELETE - Nothing happens; there is no attribute to delete. // - // * ADD - DynamoDB creates an item with the supplied primary key and number - // (or set of numbers) for the attribute value. The only data types allowed - // are number and number set; no other data types can be specified. + // * ADD - DynamoDB creates a new item with the supplied primary key and + // number (or set) for the attribute value. The only data types allowed are + // number, number set, string set or binary set. Action *string `type:"string" enum:"AttributeAction"` // Represents the data for an attribute. @@ -7976,7 +8279,8 @@ type BackupDetails struct { // BackupName is a required field BackupName *string `min:"3" type:"string" required:"true"` - // Size of the backup in bytes. + // Size of the backup in bytes. DynamoDB updates this value approximately every + // six hours. Recent changes might not be reflected in this value. BackupSizeBytes *int64 `type:"long"` // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. @@ -8643,7 +8947,7 @@ type BatchStatementError struct { // The error code associated with the failed PartiQL batch statement. Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` - // The error message associated with the PartiQL batch resposne. + // The error message associated with the PartiQL batch response. Message *string `type:"string"` } @@ -9153,14 +9457,14 @@ func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { // Represents the selection criteria for a Query or Scan operation: // -// * For a Query operation, Condition is used for specifying the KeyConditions -// to use when querying a table or an index. For KeyConditions, only the -// following comparison operators are supported: EQ | LE | LT | GE | GT | -// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates -// the query results and returns only the desired values. +// - For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the +// following comparison operators are supported: EQ | LE | LT | GE | GT | +// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates +// the query results and returns only the desired values. // -// * For a Scan operation, Condition is used in a ScanFilter, which evaluates -// the scan results and returns only the desired values. +// - For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. type Condition struct { _ struct{} `type:"structure"` @@ -10575,6 +10879,66 @@ func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTabl return s } +// Processing options for the CSV file being imported. +type CsvOptions struct { + _ struct{} `type:"structure"` + + // The delimiter used for separating items in the CSV file being imported. + Delimiter *string `min:"1" type:"string"` + + // List of the headers used to specify a common header for all source CSV files + // being imported. If this field is specified then the first line of each CSV + // file is treated as data instead of the header. If this field is not specified + // the the first line of each CSV file is treated as the header. + HeaderList []*string `min:"1" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CsvOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CsvOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CsvOptions"} + if s.Delimiter != nil && len(*s.Delimiter) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1)) + } + if s.HeaderList != nil && len(s.HeaderList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HeaderList", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDelimiter sets the Delimiter field's value. +func (s *CsvOptions) SetDelimiter(v string) *CsvOptions { + s.Delimiter = &v + return s +} + +// SetHeaderList sets the HeaderList field's value. +func (s *CsvOptions) SetHeaderList(v []*string) *CsvOptions { + s.HeaderList = v + return s +} + // Represents a request to perform a DeleteItem operation. type Delete struct { _ struct{} `type:"structure"` @@ -10938,6 +11302,10 @@ type DeleteItemInput struct { // // * ALL_OLD - The content of the old item is returned. // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // // The ReturnValues parameter is used by several DynamoDB operations; however, // DeleteItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` @@ -11949,13 +12317,14 @@ func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSetti return s } -type DescribeKinesisStreamingDestinationInput struct { +type DescribeImportInput struct { _ struct{} `type:"structure"` - // The name of the table being described. + // The Amazon Resource Name (ARN) associated with the table you're importing + // to. // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // ImportArn is a required field + ImportArn *string `min:"37" type:"string" required:"true"` } // String returns the string representation. @@ -11963,7 +12332,7 @@ type DescribeKinesisStreamingDestinationInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationInput) String() string { +func (s DescribeImportInput) String() string { return awsutil.Prettify(s) } @@ -11972,15 +12341,99 @@ func (s DescribeKinesisStreamingDestinationInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DescribeKinesisStreamingDestinationInput) GoString() string { +func (s DescribeImportInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeKinesisStreamingDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *DescribeImportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImportInput"} + if s.ImportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ImportArn")) + } + if s.ImportArn != nil && len(*s.ImportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ImportArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImportArn sets the ImportArn field's value. +func (s *DescribeImportInput) SetImportArn(v string) *DescribeImportInput { + s.ImportArn = &v + return s +} + +type DescribeImportOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeImportOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescription) *DescribeImportOutput { + s.ImportTableDescription = v + return s +} + +type DescribeKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The name of the table being described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeKinesisStreamingDestinationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) } if s.TableName != nil && len(*s.TableName) < 3 { invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) @@ -13035,17 +13488,17 @@ func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTrans // evaluates to true, the operation succeeds; if not, the operation fails. You // can use ExpectedAttributeValue in one of two different ways: // -// * Use AttributeValueList to specify one or more values to compare against -// an attribute. Use ComparisonOperator to specify how you want to perform -// the comparison. If the comparison evaluates to true, then the conditional -// operation succeeds. +// - Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform +// the comparison. If the comparison evaluates to true, then the conditional +// operation succeeds. // -// * Use Value to specify a value that DynamoDB will compare against an attribute. -// If the values match, then ExpectedAttributeValue evaluates to true and -// the conditional operation succeeds. Optionally, you can also set Exists -// to false, indicating that you do not expect to find the attribute value -// in the table. In this case, the conditional operation succeeds only if -// the comparison evaluates to false. +// - Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and +// the conditional operation succeeds. Optionally, you can also set Exists +// to false, indicating that you do not expect to find the attribute value +// in the table. In this case, the conditional operation succeeds only if +// the comparison evaluates to false. // // Value and Exists are incompatible with AttributeValueList and ComparisonOperator. // Note that if you use both sets of parameters at once, DynamoDB will return @@ -13647,16 +14100,16 @@ type ExportTableToPointInTimeInput struct { // for more than 8 hours, or the result might not be idempotent. // // If you submit a request with the same client token but a change in other - // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch - // exception. + // parameters within the 8-hour idempotency window, DynamoDB returns an ImportConflictException. ClientToken *string `type:"string" idempotencyToken:"true"` // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON // or ION. ExportFormat *string `type:"string" enum:"ExportFormat"` - // Time in the past from which to export table data. The table export will be - // a snapshot of the table's state at this point in time. + // Time in the past from which to export table data, counted in seconds from + // the start of the Unix epoch. The table export will be a snapshot of the table's + // state at this point in time. ExportTime *time.Time `type:"timestamp"` // The name of the Amazon S3 bucket to export the snapshot to. @@ -14586,12 +15039,12 @@ func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroug // Represents one of the following: // -// * A new global secondary index to be added to an existing table. +// - A new global secondary index to be added to an existing table. // -// * New provisioned throughput parameters for an existing global secondary -// index. +// - New provisioned throughput parameters for an existing global secondary +// index. // -// * An existing global secondary index to be removed from an existing table. +// - An existing global secondary index to be removed from an existing table. type GlobalSecondaryIndexUpdate struct { _ struct{} `type:"structure"` @@ -15068,8 +15521,10 @@ func (s *IdempotentParameterMismatchException) RequestID() string { return s.RespMetadata.RequestID } -// The operation tried to access a nonexistent index. -type IndexNotFoundException struct { +// There was a conflict when importing from the specified S3 source. This can +// occur when the current import conflicts with a previous import request that +// had the same client token. +type ImportConflictException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -15081,7 +15536,7 @@ type IndexNotFoundException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s IndexNotFoundException) String() string { +func (s ImportConflictException) String() string { return awsutil.Prettify(s) } @@ -15090,23 +15545,23 @@ func (s IndexNotFoundException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s IndexNotFoundException) GoString() string { +func (s ImportConflictException) GoString() string { return s.String() } -func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { - return &IndexNotFoundException{ +func newErrorImportConflictException(v protocol.ResponseMetadata) error { + return &ImportConflictException{ RespMetadata: v, } } // Code returns the exception type name. -func (s *IndexNotFoundException) Code() string { - return "IndexNotFoundException" +func (s *ImportConflictException) Code() string { + return "ImportConflictException" } // Message returns the exception's message. -func (s *IndexNotFoundException) Message() string { +func (s *ImportConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15114,30 +15569,29 @@ func (s *IndexNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IndexNotFoundException) OrigErr() error { +func (s *ImportConflictException) OrigErr() error { return nil } -func (s *IndexNotFoundException) Error() string { +func (s *ImportConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s *IndexNotFoundException) StatusCode() int { +func (s *ImportConflictException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s *IndexNotFoundException) RequestID() string { +func (s *ImportConflictException) RequestID() string { return s.RespMetadata.RequestID } -// An error occurred on the server side. -type InternalServerError struct { +// The specified import was not found. +type ImportNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The server encountered an internal error trying to fulfill the request. Message_ *string `locationName:"message" type:"string"` } @@ -15146,7 +15600,7 @@ type InternalServerError struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerError) String() string { +func (s ImportNotFoundException) String() string { return awsutil.Prettify(s) } @@ -15155,23 +15609,23 @@ func (s InternalServerError) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerError) GoString() string { +func (s ImportNotFoundException) GoString() string { return s.String() } -func newErrorInternalServerError(v protocol.ResponseMetadata) error { - return &InternalServerError{ +func newErrorImportNotFoundException(v protocol.ResponseMetadata) error { + return &ImportNotFoundException{ RespMetadata: v, } } // Code returns the exception type name. -func (s *InternalServerError) Code() string { - return "InternalServerError" +func (s *ImportNotFoundException) Code() string { + return "ImportNotFoundException" } // Message returns the exception's message. -func (s *InternalServerError) Message() string { +func (s *ImportNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15179,30 +15633,55 @@ func (s *InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerError) OrigErr() error { +func (s *ImportNotFoundException) OrigErr() error { return nil } -func (s *InternalServerError) Error() string { +func (s *ImportNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s *InternalServerError) StatusCode() int { +func (s *ImportNotFoundException) StatusCode() int { return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s *InternalServerError) RequestID() string { +func (s *ImportNotFoundException) RequestID() string { return s.RespMetadata.RequestID } -// The specified ExportTime is outside of the point in time recovery window. -type InvalidExportTimeException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +// Summary information about the source file for the import. +type ImportSummary struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"message" type:"string"` + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // this import task. + CloudWatchLogGroupArn *string `min:"1" type:"string"` + + // The time at which this import task ended. (Does this include the successful + // complete creation of the table it was imported to?) + EndTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` + + // The status of the import operation. + ImportStatus *string `type:"string" enum:"ImportStatus"` + + // The format of the source data. Valid values are CSV, DYNAMODB_JSON or ION. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The path and S3 bucket of the source file that is being imported. This includes + // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional + // if the bucket is owned by the requester). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time at which this import task began. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` } // String returns the string representation. @@ -15210,7 +15689,7 @@ type InvalidExportTimeException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InvalidExportTimeException) String() string { +func (s ImportSummary) String() string { return awsutil.Prettify(s) } @@ -15219,134 +15698,127 @@ func (s InvalidExportTimeException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InvalidExportTimeException) GoString() string { +func (s ImportSummary) GoString() string { return s.String() } -func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { - return &InvalidExportTimeException{ - RespMetadata: v, - } +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportSummary) SetCloudWatchLogGroupArn(v string) *ImportSummary { + s.CloudWatchLogGroupArn = &v + return s } -// Code returns the exception type name. -func (s *InvalidExportTimeException) Code() string { - return "InvalidExportTimeException" +// SetEndTime sets the EndTime field's value. +func (s *ImportSummary) SetEndTime(v time.Time) *ImportSummary { + s.EndTime = &v + return s } -// Message returns the exception's message. -func (s *InvalidExportTimeException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetImportArn sets the ImportArn field's value. +func (s *ImportSummary) SetImportArn(v string) *ImportSummary { + s.ImportArn = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidExportTimeException) OrigErr() error { - return nil +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportSummary) SetImportStatus(v string) *ImportSummary { + s.ImportStatus = &v + return s } -func (s *InvalidExportTimeException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetInputFormat sets the InputFormat field's value. +func (s *ImportSummary) SetInputFormat(v string) *ImportSummary { + s.InputFormat = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidExportTimeException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportSummary) SetS3BucketSource(v *S3BucketSource) *ImportSummary { + s.S3BucketSource = v + return s } -// RequestID returns the service's response RequestID for request. -func (s *InvalidExportTimeException) RequestID() string { - return s.RespMetadata.RequestID +// SetStartTime sets the StartTime field's value. +func (s *ImportSummary) SetStartTime(v time.Time) *ImportSummary { + s.StartTime = &v + return s } -// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime -// and LatestRestorableDateTime. -type InvalidRestoreTimeException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` +// SetTableArn sets the TableArn field's value. +func (s *ImportSummary) SetTableArn(v string) *ImportSummary { + s.TableArn = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRestoreTimeException) String() string { - return awsutil.Prettify(s) -} +// Represents the properties of the table being imported into. +type ImportTableDescription struct { + _ struct{} `type:"structure"` -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidRestoreTimeException) GoString() string { - return s.String() -} + // The client token that was provided for the import task. Reusing the client + // token on retry makes a call to ImportTable idempotent. + ClientToken *string `type:"string"` -func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { - return &InvalidRestoreTimeException{ - RespMetadata: v, - } -} + // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with + // the target table. + CloudWatchLogGroupArn *string `min:"1" type:"string"` -// Code returns the exception type name. -func (s *InvalidRestoreTimeException) Code() string { - return "InvalidRestoreTimeException" -} + // The time at which the creation of the table associated with this import task + // completed. + EndTime *time.Time `type:"timestamp"` -// Message returns the exception's message. -func (s *InvalidRestoreTimeException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // The number of errors occurred on importing the source file into the target + // table. + ErrorCount *int64 `type:"long"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRestoreTimeException) OrigErr() error { - return nil -} + // The error code corresponding to the failure that the import job ran into + // during execution. + FailureCode *string `type:"string"` -func (s *InvalidRestoreTimeException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + // The error message corresponding to the failure that the import job ran into + // during execution. + FailureMessage *string `type:"string"` -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRestoreTimeException) StatusCode() int { - return s.RespMetadata.StatusCode -} + // The Amazon Resource Number (ARN) corresponding to the import request. + ImportArn *string `min:"37" type:"string"` -// RequestID returns the service's response RequestID for request. -func (s *InvalidRestoreTimeException) RequestID() string { - return s.RespMetadata.RequestID -} + // The status of the import. + ImportStatus *string `type:"string" enum:"ImportStatus"` -// Information about item collections, if any, that were affected by the operation. -// ItemCollectionMetrics is only returned if the request asked for it. If the -// table does not have any local secondary indexes, this information is not -// returned in the response. -type ItemCollectionMetrics struct { - _ struct{} `type:"structure"` + // The number of items successfully imported into the new table. + ImportedItemCount *int64 `type:"long"` - // The partition key value of the item collection. This value is the same as - // the partition key value of the item. - ItemCollectionKey map[string]*AttributeValue `type:"map"` + // The compression options for the data that has been imported into the target + // table. The values are NONE, GZIP, or ZSTD. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` - // An estimate of item collection size, in gigabytes. This value is a two-element - // array containing a lower bound and an upper bound for the estimate. The estimate - // includes the size of all the items in the table, plus the size of all attributes - // projected into all of the local secondary indexes on that table. Use this - // estimate to measure whether a local secondary index is approaching its size - // limit. - // - // The estimate is subject to change over time; therefore, do not rely on the - // precision or accuracy of the estimate. - SizeEstimateRangeGB []*float64 `type:"list"` + // The format of the source data going into the target table. + InputFormat *string `type:"string" enum:"InputFormat"` + + // The format options for the data that was imported into the target table. + // There is one value, CsvOption. + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The total number of items processed from the source file. + ProcessedItemCount *int64 `type:"long"` + + // The total size of data processed from the source file, in Bytes. + ProcessedSizeBytes *int64 `type:"long"` + + // Values for the S3 bucket the source file is imported from. Includes bucket + // name (required), key prefix (optional) and bucket account owner ID (optional). + S3BucketSource *S3BucketSource `type:"structure"` + + // The time when this import task started. + StartTime *time.Time `type:"timestamp"` + + // The Amazon Resource Number (ARN) of the table being imported into. + TableArn *string `type:"string"` + + // The parameters for the new table that is being imported into. + TableCreationParameters *TableCreationParameters `type:"structure"` + + // The table id corresponding to the table created by import table process. + TableId *string `type:"string"` } // String returns the string representation. @@ -15354,7 +15826,7 @@ type ItemCollectionMetrics struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ItemCollectionMetrics) String() string { +func (s ImportTableDescription) String() string { return awsutil.Prettify(s) } @@ -15363,25 +15835,649 @@ func (s ItemCollectionMetrics) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ItemCollectionMetrics) GoString() string { +func (s ImportTableDescription) GoString() string { return s.String() } -// SetItemCollectionKey sets the ItemCollectionKey field's value. -func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { - s.ItemCollectionKey = v +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableDescription) SetClientToken(v string) *ImportTableDescription { + s.ClientToken = &v return s } -// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. -func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { - s.SizeEstimateRangeGB = v +// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value. +func (s *ImportTableDescription) SetCloudWatchLogGroupArn(v string) *ImportTableDescription { + s.CloudWatchLogGroupArn = &v return s } -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -type ItemCollectionSizeLimitExceededException struct { +// SetEndTime sets the EndTime field's value. +func (s *ImportTableDescription) SetEndTime(v time.Time) *ImportTableDescription { + s.EndTime = &v + return s +} + +// SetErrorCount sets the ErrorCount field's value. +func (s *ImportTableDescription) SetErrorCount(v int64) *ImportTableDescription { + s.ErrorCount = &v + return s +} + +// SetFailureCode sets the FailureCode field's value. +func (s *ImportTableDescription) SetFailureCode(v string) *ImportTableDescription { + s.FailureCode = &v + return s +} + +// SetFailureMessage sets the FailureMessage field's value. +func (s *ImportTableDescription) SetFailureMessage(v string) *ImportTableDescription { + s.FailureMessage = &v + return s +} + +// SetImportArn sets the ImportArn field's value. +func (s *ImportTableDescription) SetImportArn(v string) *ImportTableDescription { + s.ImportArn = &v + return s +} + +// SetImportStatus sets the ImportStatus field's value. +func (s *ImportTableDescription) SetImportStatus(v string) *ImportTableDescription { + s.ImportStatus = &v + return s +} + +// SetImportedItemCount sets the ImportedItemCount field's value. +func (s *ImportTableDescription) SetImportedItemCount(v int64) *ImportTableDescription { + s.ImportedItemCount = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableDescription) SetInputCompressionType(v string) *ImportTableDescription { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableDescription) SetInputFormat(v string) *ImportTableDescription { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableDescription) SetInputFormatOptions(v *InputFormatOptions) *ImportTableDescription { + s.InputFormatOptions = v + return s +} + +// SetProcessedItemCount sets the ProcessedItemCount field's value. +func (s *ImportTableDescription) SetProcessedItemCount(v int64) *ImportTableDescription { + s.ProcessedItemCount = &v + return s +} + +// SetProcessedSizeBytes sets the ProcessedSizeBytes field's value. +func (s *ImportTableDescription) SetProcessedSizeBytes(v int64) *ImportTableDescription { + s.ProcessedSizeBytes = &v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableDescription) SetS3BucketSource(v *S3BucketSource) *ImportTableDescription { + s.S3BucketSource = v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *ImportTableDescription) SetStartTime(v time.Time) *ImportTableDescription { + s.StartTime = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *ImportTableDescription) SetTableArn(v string) *ImportTableDescription { + s.TableArn = &v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableDescription) SetTableCreationParameters(v *TableCreationParameters) *ImportTableDescription { + s.TableCreationParameters = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *ImportTableDescription) SetTableId(v string) *ImportTableDescription { + s.TableId = &v + return s +} + +type ImportTableInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning + // that multiple identical calls have the same effect as one single call. + // + // A client token is valid for 8 hours after the first request that uses it + // is completed. After 8 hours, any request with the same client token is treated + // as a new request. Do not resubmit the same request with the same client token + // for more than 8 hours, or the result might not be idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Type of compression to be used on the input coming from the imported table. + InputCompressionType *string `type:"string" enum:"InputCompressionType"` + + // The format of the source data. Valid values for ImportFormat are CSV, DYNAMODB_JSON + // or ION. + // + // InputFormat is a required field + InputFormat *string `type:"string" required:"true" enum:"InputFormat"` + + // Additional properties that specify how the input is formatted, + InputFormatOptions *InputFormatOptions `type:"structure"` + + // The S3 bucket that provides the source for the import. + // + // S3BucketSource is a required field + S3BucketSource *S3BucketSource `type:"structure" required:"true"` + + // Parameters for the table to import the data into. + // + // TableCreationParameters is a required field + TableCreationParameters *TableCreationParameters `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportTableInput"} + if s.InputFormat == nil { + invalidParams.Add(request.NewErrParamRequired("InputFormat")) + } + if s.S3BucketSource == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketSource")) + } + if s.TableCreationParameters == nil { + invalidParams.Add(request.NewErrParamRequired("TableCreationParameters")) + } + if s.InputFormatOptions != nil { + if err := s.InputFormatOptions.Validate(); err != nil { + invalidParams.AddNested("InputFormatOptions", err.(request.ErrInvalidParams)) + } + } + if s.S3BucketSource != nil { + if err := s.S3BucketSource.Validate(); err != nil { + invalidParams.AddNested("S3BucketSource", err.(request.ErrInvalidParams)) + } + } + if s.TableCreationParameters != nil { + if err := s.TableCreationParameters.Validate(); err != nil { + invalidParams.AddNested("TableCreationParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ImportTableInput) SetClientToken(v string) *ImportTableInput { + s.ClientToken = &v + return s +} + +// SetInputCompressionType sets the InputCompressionType field's value. +func (s *ImportTableInput) SetInputCompressionType(v string) *ImportTableInput { + s.InputCompressionType = &v + return s +} + +// SetInputFormat sets the InputFormat field's value. +func (s *ImportTableInput) SetInputFormat(v string) *ImportTableInput { + s.InputFormat = &v + return s +} + +// SetInputFormatOptions sets the InputFormatOptions field's value. +func (s *ImportTableInput) SetInputFormatOptions(v *InputFormatOptions) *ImportTableInput { + s.InputFormatOptions = v + return s +} + +// SetS3BucketSource sets the S3BucketSource field's value. +func (s *ImportTableInput) SetS3BucketSource(v *S3BucketSource) *ImportTableInput { + s.S3BucketSource = v + return s +} + +// SetTableCreationParameters sets the TableCreationParameters field's value. +func (s *ImportTableInput) SetTableCreationParameters(v *TableCreationParameters) *ImportTableInput { + s.TableCreationParameters = v + return s +} + +type ImportTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table created for the import, and parameters + // of the import. The import parameters include import status, how many items + // were processed, and how many errors were encountered. + // + // ImportTableDescription is a required field + ImportTableDescription *ImportTableDescription `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportTableOutput) GoString() string { + return s.String() +} + +// SetImportTableDescription sets the ImportTableDescription field's value. +func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) *ImportTableOutput { + s.ImportTableDescription = v + return s +} + +// The operation tried to access a nonexistent index. +type IndexNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexNotFoundException) GoString() string { + return s.String() +} + +func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { + return &IndexNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IndexNotFoundException) Code() string { + return "IndexNotFoundException" +} + +// Message returns the exception's message. +func (s *IndexNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IndexNotFoundException) OrigErr() error { + return nil +} + +func (s *IndexNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IndexNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IndexNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The format options for the data that was imported into the target table. +// There is one value, CsvOption. +type InputFormatOptions struct { + _ struct{} `type:"structure"` + + // The options for imported source files in CSV format. The values are Delimiter + // and HeaderList. + Csv *CsvOptions `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputFormatOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputFormatOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputFormatOptions"} + if s.Csv != nil { + if err := s.Csv.Validate(); err != nil { + invalidParams.AddNested("Csv", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCsv sets the Csv field's value. +func (s *InputFormatOptions) SetCsv(v *CsvOptions) *InputFormatOptions { + s.Csv = v + return s +} + +// An error occurred on the server side. +type InternalServerError struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The server encountered an internal error trying to fulfill the request. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerError) GoString() string { + return s.String() +} + +func newErrorInternalServerError(v protocol.ResponseMetadata) error { + return &InternalServerError{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerError) Code() string { + return "InternalServerError" +} + +// Message returns the exception's message. +func (s *InternalServerError) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerError) OrigErr() error { + return nil +} + +func (s *InternalServerError) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified ExportTime is outside of the point in time recovery window. +type InvalidExportTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidExportTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error { + return &InvalidExportTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExportTimeException) Code() string { + return "InvalidExportTimeException" +} + +// Message returns the exception's message. +func (s *InvalidExportTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidExportTimeException) OrigErr() error { + return nil +} + +func (s *InvalidExportTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidExportTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidExportTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +type InvalidRestoreTimeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRestoreTimeException) GoString() string { + return s.String() +} + +func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { + return &InvalidRestoreTimeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRestoreTimeException) Code() string { + return "InvalidRestoreTimeException" +} + +// Message returns the exception's message. +func (s *InvalidRestoreTimeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRestoreTimeException) OrigErr() error { + return nil +} + +func (s *InvalidRestoreTimeException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRestoreTimeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRestoreTimeException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The partition key value of the item collection. This value is the same as + // the partition key value of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// SetItemCollectionKey sets the ItemCollectionKey field's value. +func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { + s.ItemCollectionKey = v + return s +} + +// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. +func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { + s.SizeEstimateRangeGB = v + return s +} + +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +type ItemCollectionSizeLimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -15759,16 +16855,16 @@ func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStream // There is no limit to the number of daily on-demand backups that can be taken. // -// Up to 50 simultaneous table operations are allowed per account. These operations +// Up to 500 simultaneous table operations are allowed per account. These operations // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, // and RestoreTableToPointInTime. // // The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, +// indexes. You can have up to 250 such requests running at a time; however, // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account quota of 256 tables. +// There is a soft account quota of 2,500 tables. type LimitExceededException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -15840,7 +16936,8 @@ type ListBackupsInput struct { // // Where BackupType can be: // - // * USER - On-demand backup created by you. + // * USER - On-demand backup created by you. (The default setting if no other + // backup types are specified.) // // * SYSTEM - On-demand backup automatically created by DynamoDB. // @@ -16250,32 +17347,141 @@ func (s *ListGlobalTablesInput) Validate() error { return nil } -// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. -func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { - s.ExclusiveStartGlobalTableName = &v +// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. +func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { + s.ExclusiveStartGlobalTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { + s.Limit = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { + s.RegionName = &v + return s +} + +type ListGlobalTablesOutput struct { + _ struct{} `type:"structure"` + + // List of global table names. + GlobalTables []*GlobalTable `type:"list"` + + // Last evaluated global table name. + LastEvaluatedGlobalTableName *string `min:"3" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListGlobalTablesOutput) GoString() string { + return s.String() +} + +// SetGlobalTables sets the GlobalTables field's value. +func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { + s.GlobalTables = v + return s +} + +// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. +func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { + s.LastEvaluatedGlobalTableName = &v + return s +} + +type ListImportsInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListImports. When provided in this manner, the API fetches + // the next page of results. + NextToken *string `min:"112" type:"string"` + + // The number of ImportSummary objects returned in a single page. + PageSize *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) associated with the table that was imported + // to. + TableArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListImportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListImportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListImportsInput"} + if s.NextToken != nil && len(*s.NextToken) < 112 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 112)) + } + if s.PageSize != nil && *s.PageSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("PageSize", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListImportsInput) SetNextToken(v string) *ListImportsInput { + s.NextToken = &v return s } -// SetLimit sets the Limit field's value. -func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { - s.Limit = &v +// SetPageSize sets the PageSize field's value. +func (s *ListImportsInput) SetPageSize(v int64) *ListImportsInput { + s.PageSize = &v return s } -// SetRegionName sets the RegionName field's value. -func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { - s.RegionName = &v +// SetTableArn sets the TableArn field's value. +func (s *ListImportsInput) SetTableArn(v string) *ListImportsInput { + s.TableArn = &v return s } -type ListGlobalTablesOutput struct { +type ListImportsOutput struct { _ struct{} `type:"structure"` - // List of global table names. - GlobalTables []*GlobalTable `type:"list"` + // A list of ImportSummary objects. + ImportSummaryList []*ImportSummary `type:"list"` - // Last evaluated global table name. - LastEvaluatedGlobalTableName *string `min:"3" type:"string"` + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListImports again, with NextToken set to this value. + NextToken *string `min:"112" type:"string"` } // String returns the string representation. @@ -16283,7 +17489,7 @@ type ListGlobalTablesOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListGlobalTablesOutput) String() string { +func (s ListImportsOutput) String() string { return awsutil.Prettify(s) } @@ -16292,19 +17498,19 @@ func (s ListGlobalTablesOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListGlobalTablesOutput) GoString() string { +func (s ListImportsOutput) GoString() string { return s.String() } -// SetGlobalTables sets the GlobalTables field's value. -func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { - s.GlobalTables = v +// SetImportSummaryList sets the ImportSummaryList field's value. +func (s *ListImportsOutput) SetImportSummaryList(v []*ImportSummary) *ListImportsOutput { + s.ImportSummaryList = v return s } -// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. -func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { - s.LastEvaluatedGlobalTableName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { + s.NextToken = &v return s } @@ -17036,7 +18242,7 @@ type Projection struct { // Represents the non-key attribute names which will be projected into the index. // // For local secondary indexes, the total count of NonKeyAttributes summed across - // all of the local secondary indexes, must not exceed 20. If you project the + // all of the local secondary indexes, must not exceed 100. If you project the // same attribute into two different indexes, this counts as two distinct attributes // when determining the total. NonKeyAttributes []*string `min:"1" type:"list"` @@ -17624,6 +18830,10 @@ type PutItemInput struct { // // The values returned are strongly consistent. // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // // The ReturnValues parameter is used by several DynamoDB operations; however, // PutItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` @@ -17942,7 +19152,7 @@ type QueryInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) // in the Amazon DynamoDB Developer Guide. FilterExpression *string `type:"string"` @@ -18104,8 +19314,8 @@ type QueryInput struct { // * COUNT - Returns the number of matching items, rather than the matching // items themselves. // - // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. - // This return value is equivalent to specifying AttributesToGet without + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without // specifying any value for Select. If you query or scan a local secondary // index and request only attributes that are projected into that index, // the operation will read only the index and not the table. If any of the @@ -18116,12 +19326,12 @@ type QueryInput struct { // are projected into the index. Global secondary index queries cannot fetch // attributes from the parent table. // - // If neither Select nor AttributesToGet are specified, DynamoDB defaults to - // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when - // accessing an index. You cannot use both Select and AttributesToGet together + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. - // (This usage is equivalent to specifying AttributesToGet without any value - // for Select.) + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) // // If you use the ProjectionExpression parameter, then the value for Select // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an @@ -19483,11 +20693,11 @@ func (s *ReplicaSettingsUpdate) SetReplicaTableClass(v string) *ReplicaSettingsU // Represents one of the following: // -// * A new replica to be added to an existing global table. +// - A new replica to be added to an existing global table. // -// * New parameters for an existing replica. +// - New parameters for an existing replica. // -// * An existing replica to be removed from an existing global table. +// - An existing replica to be removed from an existing global table. type ReplicaUpdate struct { _ struct{} `type:"structure"` @@ -19550,16 +20760,19 @@ func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate { // Represents one of the following: // -// * A new replica to be added to an existing regional table or global table. -// This request invokes the CreateTableReplica action in the destination -// Region. +// - A new replica to be added to an existing regional table or global table. +// This request invokes the CreateTableReplica action in the destination +// Region. // -// * New parameters for an existing replica. This request invokes the UpdateTable -// action in the destination Region. +// - New parameters for an existing replica. This request invokes the UpdateTable +// action in the destination Region. // -// * An existing replica to be deleted. The request invokes the DeleteTableReplica -// action in the destination Region, deleting the replica and all if its -// items in the destination Region. +// - An existing replica to be deleted. The request invokes the DeleteTableReplica +// action in the destination Region, deleting the replica and all if its +// items in the destination Region. +// +// When you manually remove a table or global table replica, you do not automatically +// remove any associated scalable targets, scaling policies, or CloudWatch alarms. type ReplicationGroupUpdate struct { _ struct{} `type:"structure"` @@ -20262,6 +21475,72 @@ func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescriptio return s } +// The S3 bucket that is being imported from. +type S3BucketSource struct { + _ struct{} `type:"structure"` + + // The S3 bucket that is being imported from. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The account number of the S3 bucket that is being imported from. If the bucket + // is owned by the requester this is optional. + S3BucketOwner *string `type:"string"` + + // The key prefix shared by all S3 Objects that are being imported. + S3KeyPrefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s S3BucketSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3BucketSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3BucketSource"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *S3BucketSource) SetS3Bucket(v string) *S3BucketSource { + s.S3Bucket = &v + return s +} + +// SetS3BucketOwner sets the S3BucketOwner field's value. +func (s *S3BucketSource) SetS3BucketOwner(v string) *S3BucketSource { + s.S3BucketOwner = &v + return s +} + +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *S3BucketSource) SetS3KeyPrefix(v string) *S3BucketSource { + s.S3KeyPrefix = &v + return s +} + // The description of the server-side encryption status on the specified table. type SSEDescription struct { _ struct{} `type:"structure"` @@ -20500,7 +21779,7 @@ type ScanInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Query.FilterExpression) // in the Amazon DynamoDB Developer Guide. FilterExpression *string `type:"string"` @@ -20590,8 +21869,8 @@ type ScanInput struct { // * COUNT - Returns the number of matching items, rather than the matching // items themselves. // - // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. - // This return value is equivalent to specifying AttributesToGet without + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression. + // This return value is equivalent to specifying ProjectionExpression without // specifying any value for Select. If you query or scan a local secondary // index and request only attributes that are projected into that index, // the operation reads only the index and not the table. If any of the requested @@ -20602,12 +21881,12 @@ type ScanInput struct { // into the index. Global secondary index queries cannot fetch attributes // from the parent table. // - // If neither Select nor AttributesToGet are specified, DynamoDB defaults to - // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when - // accessing an index. You cannot use both Select and AttributesToGet together + // If neither Select nor ProjectionExpression are specified, DynamoDB defaults + // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and ProjectionExpression together // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. - // (This usage is equivalent to specifying AttributesToGet without any value - // for Select.) + // (This usage is equivalent to specifying ProjectionExpression without any + // value for Select.) // // If you use the ProjectionExpression parameter, then the value for Select // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an @@ -21310,6 +22589,166 @@ func (s *TableClassSummary) SetTableClass(v string) *TableClassSummary { return s } +// The parameters for the table created as part of the import operation. +type TableCreationParameters struct { + _ struct{} `type:"structure"` + + // The attributes of the table created as part of the import operation. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // The billing mode for provisioning the table created as part of the import + // operation. + BillingMode *string `type:"string" enum:"BillingMode"` + + // The Global Secondary Indexes (GSI) of the table to be created as part of + // the import operation. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // The primary key and option sort key of the table created as part of the import + // operation. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The name of the table created as part of the import operation. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TableCreationParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableCreationParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableCreationParameters"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableCreationParameters) SetAttributeDefinitions(v []*AttributeDefinition) *TableCreationParameters { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *TableCreationParameters) SetBillingMode(v string) *TableCreationParameters { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableCreationParameters) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *TableCreationParameters { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCreationParameters { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *TableCreationParameters) SetSSESpecification(v *SSESpecification) *TableCreationParameters { + s.SSESpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableCreationParameters) SetTableName(v string) *TableCreationParameters { + s.TableName = &v + return s +} + // Represents the properties of a table. type TableDescription struct { _ struct{} `type:"structure"` @@ -21376,7 +22815,7 @@ type TableDescription struct { // of the table attributes are projected into the index. NonKeyAttributes // - A list of one or more non-key attribute names that are projected into // the secondary index. The total count of attributes provided in NonKeyAttributes, - // summed across all of the secondary indexes, must not exceed 20. If you + // summed across all of the secondary indexes, must not exceed 100. If you // project the same attribute into two different indexes, this counts as // two distinct attributes when determining the total. // @@ -21453,7 +22892,7 @@ type TableDescription struct { // table attributes are projected into the index. NonKeyAttributes - A list // of one or more non-key attribute names that are projected into the secondary // index. The total count of attributes provided in NonKeyAttributes, summed - // across all of the secondary indexes, must not exceed 20. If you project + // across all of the secondary indexes, must not exceed 100. If you project // the same attribute into two different indexes, this counts as two distinct // attributes when determining the total. // @@ -21740,7 +23179,8 @@ func (s *TableInUseException) RequestID() string { } // A source table with the name TableName does not currently exist within the -// subscriber's account. +// subscriber's account or the subscriber is operating in the wrong Amazon Web +// Services Region. type TableNotFoundException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -22524,87 +23964,87 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item // // DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// * A condition in one of the condition expressions is not met. +// - A condition in one of the condition expressions is not met. // -// * A table in the TransactWriteItems request is in a different account -// or region. +// - A table in the TransactWriteItems request is in a different account +// or region. // -// * More than one action in the TransactWriteItems operation targets the -// same item. +// - More than one action in the TransactWriteItems operation targets the +// same item. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// - An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// * There is a user error, such as an invalid data format. +// - There is a user error, such as an invalid data format. // // DynamoDB cancels a TransactGetItems request under the following circumstances: // -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// - There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. // -// * A table in the TransactGetItems request is in a different account or -// region. +// - A table in the TransactGetItems request is in a different account or +// region. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// - There is insufficient provisioned capacity for the transaction to be +// completed. // -// * There is a user error, such as an invalid data format. +// - There is a user error, such as an invalid data format. // // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons // property. This property is not set for other languages. Transaction cancellation // reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. +// it will have None code and Null message. // // Cancellation reason codes and possible error messages: // -// * No Errors: Code: NONE Message: null -// -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. +// - No Errors: Code: None Message: null +// +// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// - Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// - Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. type TransactionCanceledException struct { _ struct{} `type:"structure"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` @@ -24985,6 +26425,34 @@ func GlobalTableStatus_Values() []string { } } +const ( + // ImportStatusInProgress is a ImportStatus enum value + ImportStatusInProgress = "IN_PROGRESS" + + // ImportStatusCompleted is a ImportStatus enum value + ImportStatusCompleted = "COMPLETED" + + // ImportStatusCancelling is a ImportStatus enum value + ImportStatusCancelling = "CANCELLING" + + // ImportStatusCancelled is a ImportStatus enum value + ImportStatusCancelled = "CANCELLED" + + // ImportStatusFailed is a ImportStatus enum value + ImportStatusFailed = "FAILED" +) + +// ImportStatus_Values returns all elements of the ImportStatus enum +func ImportStatus_Values() []string { + return []string{ + ImportStatusInProgress, + ImportStatusCompleted, + ImportStatusCancelling, + ImportStatusCancelled, + ImportStatusFailed, + } +} + const ( // IndexStatusCreating is a IndexStatus enum value IndexStatusCreating = "CREATING" @@ -25009,6 +26477,46 @@ func IndexStatus_Values() []string { } } +const ( + // InputCompressionTypeGzip is a InputCompressionType enum value + InputCompressionTypeGzip = "GZIP" + + // InputCompressionTypeZstd is a InputCompressionType enum value + InputCompressionTypeZstd = "ZSTD" + + // InputCompressionTypeNone is a InputCompressionType enum value + InputCompressionTypeNone = "NONE" +) + +// InputCompressionType_Values returns all elements of the InputCompressionType enum +func InputCompressionType_Values() []string { + return []string{ + InputCompressionTypeGzip, + InputCompressionTypeZstd, + InputCompressionTypeNone, + } +} + +const ( + // InputFormatDynamodbJson is a InputFormat enum value + InputFormatDynamodbJson = "DYNAMODB_JSON" + + // InputFormatIon is a InputFormat enum value + InputFormatIon = "ION" + + // InputFormatCsv is a InputFormat enum value + InputFormatCsv = "CSV" +) + +// InputFormat_Values returns all elements of the InputFormat enum +func InputFormat_Values() []string { + return []string{ + InputFormatDynamodbJson, + InputFormatIon, + InputFormatCsv, + } +} + const ( // KeyTypeHash is a KeyType enum value KeyTypeHash = "HASH" @@ -25100,16 +26608,16 @@ func ReplicaStatus_Values() []string { // Determines the level of detail about either provisioned or on-demand throughput // consumption that is returned in the response: // -// * INDEXES - The response includes the aggregate ConsumedCapacity for the -// operation, together with ConsumedCapacity for each table and secondary -// index that was accessed. Note that some operations, such as GetItem and -// BatchGetItem, do not access any indexes at all. In these cases, specifying -// INDEXES will only return ConsumedCapacity information for table(s). +// - INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary +// index that was accessed. Note that some operations, such as GetItem and +// BatchGetItem, do not access any indexes at all. In these cases, specifying +// INDEXES will only return ConsumedCapacity information for table(s). // -// * TOTAL - The response includes only the aggregate ConsumedCapacity for -// the operation. +// - TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. // -// * NONE - No ConsumedCapacity details are included in the response. +// - NONE - No ConsumedCapacity details are included in the response. const ( // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value ReturnConsumedCapacityIndexes = "INDEXES" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go index c1fe449783..ab12b274f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go @@ -27,7 +27,7 @@ // See dynamodb package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/ // -// Using the Client +// # Using the Client // // To contact Amazon DynamoDB with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go index 013e9b1d2a..0cca7e4b9e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -11,7 +11,7 @@ operations such as PutItem, and unmarshaling Query and Scan APIs' responses. See the dynamodbattribute package documentation for more information. https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ -Expression Builders +# Expression Builders The expression package provides utility types and functions to build DynamoDB expression for type safe construction of API ExpressionAttributeNames, and diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go index 5af1bc2ce8..6fd36f6912 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go @@ -14,23 +14,24 @@ import ( // An Unmarshaler is an interface to provide custom unmarshaling of // AttributeValues. Use this to provide custom logic determining // how AttributeValues should be unmarshaled. -// type ExampleUnmarshaler struct { -// Value int -// } // -// func (u *ExampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { -// if av.N == nil { -// return nil -// } +// type ExampleUnmarshaler struct { +// Value int +// } // -// n, err := strconv.ParseInt(*av.N, 10, 0) -// if err != nil { -// return err -// } +// func (u *ExampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { +// if av.N == nil { +// return nil +// } // -// u.Value = int(n) -// return nil -// } +// n, err := strconv.ParseInt(*av.N, 10, 0) +// if err != nil { +// return err +// } +// +// u.Value = int(n) +// return nil +// } type Unmarshaler interface { UnmarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error } @@ -54,17 +55,17 @@ type Unmarshaler interface { // When decoding AttributeValues to interfaces Unmarshal will use the // following types. // -// []byte, AV Binary (B) -// [][]byte, AV Binary Set (BS) -// bool, AV Boolean (BOOL) -// []interface{}, AV List (L) -// map[string]interface{}, AV Map (M) -// float64, AV Number (N) -// Number, AV Number (N) with UseNumber set -// []float64, AV Number Set (NS) -// []Number, AV Number Set (NS) with UseNumber set -// string, AV String (S) -// []string, AV String Set (SS) +// []byte, AV Binary (B) +// [][]byte, AV Binary Set (BS) +// bool, AV Boolean (BOOL) +// []interface{}, AV List (L) +// map[string]interface{}, AV Map (M) +// float64, AV Number (N) +// Number, AV Number (N) with UseNumber set +// []float64, AV Number Set (NS) +// []Number, AV Number Set (NS) with UseNumber set +// string, AV String (S) +// []string, AV String Set (SS) // // If the Decoder option, UseNumber is set numbers will be unmarshaled // as Number values instead of float64. Use this to maintain the original diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go index 2b91d1006b..102f4b465c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/doc.go @@ -6,7 +6,7 @@ // Go value tyes to dynamodb.AttributeValue for DynamoDB requests, or // unmarshaling the dynamodb.AttributeValue back into a Go value type. // -// AttributeValue Marshaling +// # AttributeValue Marshaling // // To marshal a Go type to a dynamodbAttributeValue you can use the Marshal // functions in the dynamodbattribute package. There are specialized versions @@ -15,34 +15,34 @@ // The following example uses MarshalMap to convert the Record Go type to a // dynamodb.AttributeValue type and use the value to make a PutItem API request. // -// type Record struct { -// ID string -// URLs []string -// } -// -// //... -// -// r := Record{ -// ID: "ABC123", -// URLs: []string{ -// "https://example.com/first/link", -// "https://example.com/second/url", -// }, -// } -// av, err := dynamodbattribute.MarshalMap(r) -// if err != nil { -// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err)) -// } -// -// _, err = svc.PutItem(&dynamodb.PutItemInput{ -// TableName: aws.String(myTableName), -// Item: av, -// }) -// if err != nil { -// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err)) -// } -// -// AttributeValue Unmarshaling +// type Record struct { +// ID string +// URLs []string +// } +// +// //... +// +// r := Record{ +// ID: "ABC123", +// URLs: []string{ +// "https://example.com/first/link", +// "https://example.com/second/url", +// }, +// } +// av, err := dynamodbattribute.MarshalMap(r) +// if err != nil { +// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err)) +// } +// +// _, err = svc.PutItem(&dynamodb.PutItemInput{ +// TableName: aws.String(myTableName), +// Item: av, +// }) +// if err != nil { +// panic(fmt.Sprintf("failed to put Record to DynamoDB, %v", err)) +// } +// +// # AttributeValue Unmarshaling // // To unmarshal a dynamodb.AttributeValue to a Go type you can use the Unmarshal // functions in the dynamodbattribute package. There are specialized versions @@ -52,31 +52,31 @@ // Items returned by the operation will be unmarshaled into the slice of Records // Go type. // -// type Record struct { -// ID string -// URLs []string -// } +// type Record struct { +// ID string +// URLs []string +// } // -// //... +// //... // -// var records []Record +// var records []Record // -// // Use the ScanPages method to perform the scan with pagination. Use -// // just Scan method to make the API call without pagination. -// err := svc.ScanPages(&dynamodb.ScanInput{ -// TableName: aws.String(myTableName), -// }, func(page *dynamodb.ScanOutput, last bool) bool { -// recs := []Record{} +// // Use the ScanPages method to perform the scan with pagination. Use +// // just Scan method to make the API call without pagination. +// err := svc.ScanPages(&dynamodb.ScanInput{ +// TableName: aws.String(myTableName), +// }, func(page *dynamodb.ScanOutput, last bool) bool { +// recs := []Record{} // -// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs) -// if err != nil { -// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err)) -// } +// err := dynamodbattribute.UnmarshalListOfMaps(page.Items, &recs) +// if err != nil { +// panic(fmt.Sprintf("failed to unmarshal Dynamodb Scan Items, %v", err)) +// } // -// records = append(records, recs...) +// records = append(records, recs...) // -// return true // keep paging -// }) +// return true // keep paging +// }) // // The ConvertTo, ConvertToList, ConvertToMap, ConvertFrom, ConvertFromMap // and ConvertFromList methods have been deprecated. The Marshal and Unmarshal diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go index a888882909..56f0a671d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/encode.go @@ -23,6 +23,11 @@ import ( // January 1, 0001 UTC, and January 1, 0001 UTC. type UnixTime time.Time +// String calls the underlying time.Time.String to return a human readable representation +func (e UnixTime) String() string { + return time.Time(e).String() +} + // MarshalDynamoDBAttributeValue implements the Marshaler interface so that // the UnixTime can be marshaled from to a DynamoDB AttributeValue number // value encoded in the number of seconds since January 1, 1970 UTC. @@ -54,15 +59,14 @@ func (e *UnixTime) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) // to AttributeValues. Use this to provide custom logic determining how a // Go Value type should be marshaled. // -// type ExampleMarshaler struct { -// Value int -// } -// func (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { -// n := fmt.Sprintf("%v", m.Value) -// av.N = &n -// return nil -// } -// +// type ExampleMarshaler struct { +// Value int +// } +// func (m *ExampleMarshaler) MarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error { +// n := fmt.Sprintf("%v", m.Value) +// av.N = &n +// return nil +// } type Marshaler interface { MarshalDynamoDBAttributeValue(*dynamodb.AttributeValue) error } @@ -84,43 +88,43 @@ type Marshaler interface { // `dynamodbav` struct tag can be used to control how the value will be // marshaled into a AttributeValue. // -// // Field is ignored -// Field int `dynamodbav:"-"` +// // Field is ignored +// Field int `dynamodbav:"-"` // -// // Field AttributeValue map key "myName" -// Field int `dynamodbav:"myName"` +// // Field AttributeValue map key "myName" +// Field int `dynamodbav:"myName"` // -// // Field AttributeValue map key "myName", and -// // Field is omitted if it is empty -// Field int `dynamodbav:"myName,omitempty"` +// // Field AttributeValue map key "myName", and +// // Field is omitted if it is empty +// Field int `dynamodbav:"myName,omitempty"` // -// // Field AttributeValue map key "Field", and -// // Field is omitted if it is empty -// Field int `dynamodbav:",omitempty"` +// // Field AttributeValue map key "Field", and +// // Field is omitted if it is empty +// Field int `dynamodbav:",omitempty"` // -// // Field's elems will be omitted if empty -// // only valid for slices, and maps. -// Field []string `dynamodbav:",omitemptyelem"` +// // Field's elems will be omitted if empty +// // only valid for slices, and maps. +// Field []string `dynamodbav:",omitemptyelem"` // -// // Field will be marshaled as a AttributeValue string -// // only value for number types, (int,uint,float) -// Field int `dynamodbav:",string"` +// // Field will be marshaled as a AttributeValue string +// // only value for number types, (int,uint,float) +// Field int `dynamodbav:",string"` // -// // Field will be marshaled as a binary set -// Field [][]byte `dynamodbav:",binaryset"` +// // Field will be marshaled as a binary set +// Field [][]byte `dynamodbav:",binaryset"` // -// // Field will be marshaled as a number set -// Field []int `dynamodbav:",numberset"` +// // Field will be marshaled as a number set +// Field []int `dynamodbav:",numberset"` // -// // Field will be marshaled as a string set -// Field []string `dynamodbav:",stringset"` +// // Field will be marshaled as a string set +// Field []string `dynamodbav:",stringset"` // -// // Field will be marshaled as Unix time number in seconds. -// // This tag is only valid with time.Time typed struct fields. -// // Important to note that zero value time as unixtime is not 0 seconds -// // from January 1, 1970 UTC, but -62135596800. Which is seconds between -// // January 1, 0001 UTC, and January 1, 0001 UTC. -// Field time.Time `dynamodbav:",unixtime"` +// // Field will be marshaled as Unix time number in seconds. +// // This tag is only valid with time.Time typed struct fields. +// // Important to note that zero value time as unixtime is not 0 seconds +// // from January 1, 1970 UTC, but -62135596800. Which is seconds between +// // January 1, 0001 UTC, and January 1, 0001 UTC. +// Field time.Time `dynamodbav:",unixtime"` // // The omitempty tag is only used during Marshaling and is ignored for // Unmarshal. Any zero value or a value when marshaled results in a @@ -137,9 +141,9 @@ type Marshaler interface { // All struct fields and with anonymous fields, are marshaled unless the // any of the following conditions are meet. // -// - the field is not exported -// - json or dynamodbav field tag is "-" -// - json or dynamodbav field tag specifies "omitempty", and is empty. +// - the field is not exported +// - json or dynamodbav field tag is "-" +// - json or dynamodbav field tag specifies "omitempty", and is empty. // // Pointer and interfaces values encode as the value pointed to or contained // in the interface. A nil value encodes as the AttributeValue NULL value. diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 9ffd8f2d0d..9bd2107c60 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -71,6 +71,20 @@ const ( // payload but with an idempotent token that was already used. ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + // ErrCodeImportConflictException for service response error code + // "ImportConflictException". + // + // There was a conflict when importing from the specified S3 source. This can + // occur when the current import conflicts with a previous import request that + // had the same client token. + ErrCodeImportConflictException = "ImportConflictException" + + // ErrCodeImportNotFoundException for service response error code + // "ImportNotFoundException". + // + // The specified import was not found. + ErrCodeImportNotFoundException = "ImportNotFoundException" + // ErrCodeIndexNotFoundException for service response error code // "IndexNotFoundException". // @@ -108,16 +122,16 @@ const ( // // There is no limit to the number of daily on-demand backups that can be taken. // - // Up to 50 simultaneous table operations are allowed per account. These operations + // Up to 500 simultaneous table operations are allowed per account. These operations // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, // and RestoreTableToPointInTime. // // The only exception is when you are creating a table with one or more secondary - // indexes. You can have up to 25 such requests running at a time; however, + // indexes. You can have up to 250 such requests running at a time; however, // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // - // There is a soft account quota of 256 tables. + // There is a soft account quota of 2,500 tables. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodePointInTimeRecoveryUnavailableException for service response error code @@ -188,7 +202,8 @@ const ( // "TableNotFoundException". // // A source table with the name TableName does not currently exist within the - // subscriber's account. + // subscriber's account or the subscriber is operating in the wrong Amazon Web + // Services Region. ErrCodeTableNotFoundException = "TableNotFoundException" // ErrCodeTransactionCanceledException for service response error code @@ -232,11 +247,11 @@ const ( // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons // property. This property is not set for other languages. Transaction cancellation // reasons are ordered in the order of requested items, if an item has no error - // it will have NONE code and Null message. + // it will have None code and Null message. // // Cancellation reason codes and possible error messages: // - // * No Errors: Code: NONE Message: null + // * No Errors: Code: None Message: null // // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The // conditional request failed. @@ -305,6 +320,8 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException, "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, + "ImportConflictException": newErrorImportConflictException, + "ImportNotFoundException": newErrorImportNotFoundException, "IndexNotFoundException": newErrorIndexNotFoundException, "InternalServerError": newErrorInternalServerError, "InvalidExportTimeException": newErrorInvalidExportTimeException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/condition.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/condition.go index 39e1103135..cf7625efd6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/condition.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/condition.go @@ -95,20 +95,20 @@ type ConditionBuilder struct { // // Example: // -// // condition represents the equal clause of the item attribute "foo" and -// // the value 5 -// condition := expression.Equal(expression.Name("foo"), expression.Value(5)) +// // condition represents the equal clause of the item attribute "foo" and +// // the value 5 +// condition := expression.Equal(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Equal(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo = :five" +// expression.Equal(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo = :five" func Equal(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -123,20 +123,20 @@ func Equal(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the equal clause of the item attribute "foo" and -// // the value 5 -// condition := expression.Name("foo").Equal(expression.Value(5)) +// // condition represents the equal clause of the item attribute "foo" and +// // the value 5 +// condition := expression.Name("foo").Equal(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").Equal(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo = :five" +// expression.Name("foo").Equal(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo = :five" func (nb NameBuilder) Equal(right OperandBuilder) ConditionBuilder { return Equal(nb, right) } @@ -148,20 +148,20 @@ func (nb NameBuilder) Equal(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the equal clause of the item attribute "foo" and -// // the value 5 -// condition := expression.Value(5).Equal(expression.Name("foo")) +// // condition represents the equal clause of the item attribute "foo" and +// // the value 5 +// condition := expression.Value(5).Equal(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).Equal(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five = foo" +// expression.Value(5).Equal(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five = foo" func (vb ValueBuilder) Equal(right OperandBuilder) ConditionBuilder { return Equal(vb, right) } @@ -173,20 +173,20 @@ func (vb ValueBuilder) Equal(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the equal clause of the size of the item -// // attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).Equal(expression.Value(5)) +// // condition represents the equal clause of the size of the item +// // attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).Equal(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).Equal(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) = :five" +// expression.Size(expression.Name("foo")).Equal(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) = :five" func (sb SizeBuilder) Equal(right OperandBuilder) ConditionBuilder { return Equal(sb, right) } @@ -198,20 +198,20 @@ func (sb SizeBuilder) Equal(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the not equal clause of the item attribute "foo" -// // and the value 5 -// condition := expression.NotEqual(expression.Name("foo"), expression.Value(5)) +// // condition represents the not equal clause of the item attribute "foo" +// // and the value 5 +// condition := expression.NotEqual(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.NotEqual(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <> :five" +// expression.NotEqual(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <> :five" func NotEqual(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -226,20 +226,20 @@ func NotEqual(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the not equal clause of the item attribute "foo" -// // and the value 5 -// condition := expression.Name("foo").NotEqual(expression.Value(5)) +// // condition represents the not equal clause of the item attribute "foo" +// // and the value 5 +// condition := expression.Name("foo").NotEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").NotEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <> :five" +// expression.Name("foo").NotEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <> :five" func (nb NameBuilder) NotEqual(right OperandBuilder) ConditionBuilder { return NotEqual(nb, right) } @@ -251,20 +251,20 @@ func (nb NameBuilder) NotEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the not equal clause of the item attribute "foo" -// // and the value 5 -// condition := expression.Value(5).NotEqual(expression.Name("foo")) +// // condition represents the not equal clause of the item attribute "foo" +// // and the value 5 +// condition := expression.Value(5).NotEqual(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).NotEqual(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five <> foo" +// expression.Value(5).NotEqual(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five <> foo" func (vb ValueBuilder) NotEqual(right OperandBuilder) ConditionBuilder { return NotEqual(vb, right) } @@ -276,20 +276,20 @@ func (vb ValueBuilder) NotEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the not equal clause of the size of the item -// // attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).NotEqual(expression.Value(5)) +// // condition represents the not equal clause of the size of the item +// // attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).NotEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).NotEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) <> :five" +// expression.Size(expression.Name("foo")).NotEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) <> :five" func (sb SizeBuilder) NotEqual(right OperandBuilder) ConditionBuilder { return NotEqual(sb, right) } @@ -301,20 +301,20 @@ func (sb SizeBuilder) NotEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than clause of the item attribute "foo" -// // and the value 5 -// condition := expression.LessThan(expression.Name("foo"), expression.Value(5)) +// // condition represents the less than clause of the item attribute "foo" +// // and the value 5 +// condition := expression.LessThan(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.LessThan(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo < :five" +// expression.LessThan(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo < :five" func LessThan(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -329,20 +329,20 @@ func LessThan(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than clause of the item attribute "foo" -// // and the value 5 -// condition := expression.Name("foo").LessThan(expression.Value(5)) +// // condition represents the less than clause of the item attribute "foo" +// // and the value 5 +// condition := expression.Name("foo").LessThan(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").LessThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo < :five" +// expression.Name("foo").LessThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo < :five" func (nb NameBuilder) LessThan(right OperandBuilder) ConditionBuilder { return LessThan(nb, right) } @@ -354,20 +354,20 @@ func (nb NameBuilder) LessThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than clause of the item attribute "foo" -// // and the value 5 -// condition := expression.Value(5).LessThan(expression.Name("foo")) +// // condition represents the less than clause of the item attribute "foo" +// // and the value 5 +// condition := expression.Value(5).LessThan(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).LessThan(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five < foo" +// expression.Value(5).LessThan(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five < foo" func (vb ValueBuilder) LessThan(right OperandBuilder) ConditionBuilder { return LessThan(vb, right) } @@ -379,20 +379,20 @@ func (vb ValueBuilder) LessThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than clause of the size of the item -// // attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).LessThan(expression.Value(5)) +// // condition represents the less than clause of the size of the item +// // attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).LessThan(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).LessThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) < :five" +// expression.Size(expression.Name("foo")).LessThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) < :five" func (sb SizeBuilder) LessThan(right OperandBuilder) ConditionBuilder { return LessThan(sb, right) } @@ -404,20 +404,20 @@ func (sb SizeBuilder) LessThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.LessThanEqual(expression.Name("foo"), expression.Value(5)) +// // condition represents the less than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.LessThanEqual(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.LessThanEqual(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <= :five" +// expression.LessThanEqual(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <= :five" func LessThanEqual(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -432,20 +432,20 @@ func LessThanEqual(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.Name("foo").LessThanEqual(expression.Value(5)) +// // condition represents the less than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.Name("foo").LessThanEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").LessThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <= :five" +// expression.Name("foo").LessThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <= :five" func (nb NameBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { return LessThanEqual(nb, right) } @@ -457,20 +457,20 @@ func (nb NameBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.Value(5).LessThanEqual(expression.Name("foo")) +// // condition represents the less than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.Value(5).LessThanEqual(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).LessThanEqual(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five <= foo" +// expression.Value(5).LessThanEqual(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five <= foo" func (vb ValueBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { return LessThanEqual(vb, right) } @@ -482,20 +482,20 @@ func (vb ValueBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the less than equal to clause of the size of the -// // item attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).LessThanEqual(expression.Value(5)) +// // condition represents the less than equal to clause of the size of the +// // item attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).LessThanEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).LessThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) <= :five" +// expression.Size(expression.Name("foo")).LessThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) <= :five" func (sb SizeBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { return LessThanEqual(sb, right) } @@ -507,20 +507,20 @@ func (sb SizeBuilder) LessThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than clause of the item attribute -// // "foo" and the value 5 -// condition := expression.GreaterThan(expression.Name("foo"), expression.Value(5)) +// // condition represents the greater than clause of the item attribute +// // "foo" and the value 5 +// condition := expression.GreaterThan(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.GreaterThan(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo > :five" +// expression.GreaterThan(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo > :five" func GreaterThan(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -535,20 +535,20 @@ func GreaterThan(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than clause of the item attribute -// // "foo" and the value 5 -// condition := expression.Name("foo").GreaterThan(expression.Value(5)) +// // condition represents the greater than clause of the item attribute +// // "foo" and the value 5 +// condition := expression.Name("foo").GreaterThan(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").GreaterThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo > :five" +// expression.Name("foo").GreaterThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo > :five" func (nb NameBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { return GreaterThan(nb, right) } @@ -560,20 +560,20 @@ func (nb NameBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than clause of the item attribute -// // "foo" and the value 5 -// condition := expression.Value(5).GreaterThan(expression.Name("foo")) +// // condition represents the greater than clause of the item attribute +// // "foo" and the value 5 +// condition := expression.Value(5).GreaterThan(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).GreaterThan(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five > foo" +// expression.Value(5).GreaterThan(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five > foo" func (vb ValueBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { return GreaterThan(vb, right) } @@ -585,20 +585,20 @@ func (vb ValueBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than clause of the size of the item -// // attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).GreaterThan(expression.Value(5)) +// // condition represents the greater than clause of the size of the item +// // attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).GreaterThan(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).GreaterThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) > :five" +// expression.Size(expression.Name("foo")).GreaterThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) > :five" func (sb SizeBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { return GreaterThan(sb, right) } @@ -610,20 +610,20 @@ func (sb SizeBuilder) GreaterThan(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.GreaterThanEqual(expression.Name("foo"), expression.Value(5)) +// // condition represents the greater than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.GreaterThanEqual(expression.Name("foo"), expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.GreaterThanEqual(expression.Name("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo >= :five" +// expression.GreaterThanEqual(expression.Name("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo >= :five" func GreaterThanEqual(left, right OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{left, right}, @@ -638,20 +638,20 @@ func GreaterThanEqual(left, right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.Name("foo").GreaterThanEqual(expression.Value(5)) +// // condition represents the greater than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.Name("foo").GreaterThanEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("foo").GreaterThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo >= :five" +// expression.Name("foo").GreaterThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo >= :five" func (nb NameBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { return GreaterThanEqual(nb, right) } @@ -663,20 +663,20 @@ func (nb NameBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than equal to clause of the item -// // attribute "foo" and the value 5 -// condition := expression.Value(5).GreaterThanEqual(expression.Name("foo")) +// // condition represents the greater than equal to clause of the item +// // attribute "foo" and the value 5 +// condition := expression.Value(5).GreaterThanEqual(expression.Name("foo")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(5).GreaterThanEqual(expression.Name("foo")) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// ":five >= foo" +// expression.Value(5).GreaterThanEqual(expression.Name("foo")) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// ":five >= foo" func (vb ValueBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { return GreaterThanEqual(vb, right) } @@ -688,20 +688,20 @@ func (vb ValueBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the greater than equal to clause of the size of -// // the item attribute "foo" and the value 5 -// condition := expression.Size(expression.Name("foo")).GreaterThanEqual(expression.Value(5)) +// // condition represents the greater than equal to clause of the size of +// // the item attribute "foo" and the value 5 +// condition := expression.Size(expression.Name("foo")).GreaterThanEqual(expression.Value(5)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("foo")).GreaterThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "size (foo) >= :five" +// expression.Size(expression.Name("foo")).GreaterThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "size (foo) >= :five" func (sb SizeBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { return GreaterThanEqual(sb, right) } @@ -714,23 +714,23 @@ func (sb SizeBuilder) GreaterThanEqual(right OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the item attribute "Name" is -// // equal to value "Generic Name" AND the item attribute "Age" is less -// // than value 40 -// condition := expression.And(expression.Name("Name").Equal(expression.Value("Generic Name")), expression.Name("Age").LessThan(expression.Value(40))) +// // condition represents the condition where the item attribute "Name" is +// // equal to value "Generic Name" AND the item attribute "Age" is less +// // than value 40 +// condition := expression.And(expression.Name("Name").Equal(expression.Value("Generic Name")), expression.Name("Age").LessThan(expression.Value(40))) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.And(expression.Name("Name").Equal(expression.Value("Generic Name")), expression.Name("Age").LessThan(expression.Value(40))) -// // Let #NAME, :name, and :forty be ExpressionAttributeName and -// // ExpressionAttributeValues representing the item attribute "Name", the -// // value "Generic Name", and the value 40 -// "(#NAME = :name) AND (Age < :forty)" +// expression.And(expression.Name("Name").Equal(expression.Value("Generic Name")), expression.Name("Age").LessThan(expression.Value(40))) +// // Let #NAME, :name, and :forty be ExpressionAttributeName and +// // ExpressionAttributeValues representing the item attribute "Name", the +// // value "Generic Name", and the value 40 +// "(#NAME = :name) AND (Age < :forty)" func And(left, right ConditionBuilder, other ...ConditionBuilder) ConditionBuilder { other = append([]ConditionBuilder{left, right}, other...) return ConditionBuilder{ @@ -747,23 +747,23 @@ func And(left, right ConditionBuilder, other ...ConditionBuilder) ConditionBuild // // Example: // -// // condition represents the condition where the item attribute "Name" is -// // equal to value "Generic Name" AND the item attribute "Age" is less -// // than value 40 -// condition := expression.Name("Name").Equal(expression.Value("Generic Name")).And(expression.Name("Age").LessThan(expression.Value(40))) +// // condition represents the condition where the item attribute "Name" is +// // equal to value "Generic Name" AND the item attribute "Age" is less +// // than value 40 +// condition := expression.Name("Name").Equal(expression.Value("Generic Name")).And(expression.Name("Age").LessThan(expression.Value(40))) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Name").Equal(expression.Value("Generic Name")).And(expression.Name("Age").LessThan(expression.Value(40))) -// // Let #NAME, :name, and :forty be ExpressionAttributeName and -// // ExpressionAttributeValues representing the item attribute "Name", the -// // value "Generic Name", and the value 40 -// "(#NAME = :name) AND (Age < :forty)" +// expression.Name("Name").Equal(expression.Value("Generic Name")).And(expression.Name("Age").LessThan(expression.Value(40))) +// // Let #NAME, :name, and :forty be ExpressionAttributeName and +// // ExpressionAttributeValues representing the item attribute "Name", the +// // value "Generic Name", and the value 40 +// "(#NAME = :name) AND (Age < :forty)" func (cb ConditionBuilder) And(right ConditionBuilder, other ...ConditionBuilder) ConditionBuilder { return And(cb, right, other...) } @@ -776,22 +776,22 @@ func (cb ConditionBuilder) And(right ConditionBuilder, other ...ConditionBuilder // // Example: // -// // condition represents the condition where the item attribute "Price" is -// // less than the value 100 OR the item attribute "Rating" is greater than -// // the value 8 -// condition := expression.Or(expression.Name("Price").Equal(expression.Value(100)), expression.Name("Rating").LessThan(expression.Value(8))) +// // condition represents the condition where the item attribute "Price" is +// // less than the value 100 OR the item attribute "Rating" is greater than +// // the value 8 +// condition := expression.Or(expression.Name("Price").Equal(expression.Value(100)), expression.Name("Rating").LessThan(expression.Value(8))) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Or(expression.Name("Price").Equal(expression.Value(100)), expression.Name("Rating").LessThan(expression.Value(8))) -// // Let :price and :rating be ExpressionAttributeValues representing the -// // the value 100 and value 8 respectively -// "(Price < :price) OR (Rating > :rating)" +// expression.Or(expression.Name("Price").Equal(expression.Value(100)), expression.Name("Rating").LessThan(expression.Value(8))) +// // Let :price and :rating be ExpressionAttributeValues representing the +// // the value 100 and value 8 respectively +// "(Price < :price) OR (Rating > :rating)" func Or(left, right ConditionBuilder, other ...ConditionBuilder) ConditionBuilder { other = append([]ConditionBuilder{left, right}, other...) return ConditionBuilder{ @@ -808,22 +808,22 @@ func Or(left, right ConditionBuilder, other ...ConditionBuilder) ConditionBuilde // // Example: // -// // condition represents the condition where the item attribute "Price" is -// // less than the value 100 OR the item attribute "Rating" is greater than -// // the value 8 -// condition := expression.Name("Price").Equal(expression.Value(100)).Or(expression.Name("Rating").LessThan(expression.Value(8))) +// // condition represents the condition where the item attribute "Price" is +// // less than the value 100 OR the item attribute "Rating" is greater than +// // the value 8 +// condition := expression.Name("Price").Equal(expression.Value(100)).Or(expression.Name("Rating").LessThan(expression.Value(8))) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Price").Equal(expression.Value(100)).Or(expression.Name("Rating").LessThan(expression.Value(8))) -// // Let :price and :rating be ExpressionAttributeValues representing the -// // the value 100 and value 8 respectively -// "(Price < :price) OR (Rating > :rating)" +// expression.Name("Price").Equal(expression.Value(100)).Or(expression.Name("Rating").LessThan(expression.Value(8))) +// // Let :price and :rating be ExpressionAttributeValues representing the +// // the value 100 and value 8 respectively +// "(Price < :price) OR (Rating > :rating)" func (cb ConditionBuilder) Or(right ConditionBuilder, other ...ConditionBuilder) ConditionBuilder { return Or(cb, right, other...) } @@ -835,21 +835,21 @@ func (cb ConditionBuilder) Or(right ConditionBuilder, other ...ConditionBuilder) // // Example: // -// // condition represents the condition where the item attribute "Name" -// // does not begin with "test" -// condition := expression.Not(expression.Name("Name").BeginsWith("test")) +// // condition represents the condition where the item attribute "Name" +// // does not begin with "test" +// condition := expression.Not(expression.Name("Name").BeginsWith("test")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Not(expression.Name("Name").BeginsWith("test")) -// // Let :prefix be an ExpressionAttributeValue representing the value -// // "test" -// "NOT (begins_with (:prefix))" +// expression.Not(expression.Name("Name").BeginsWith("test")) +// // Let :prefix be an ExpressionAttributeValue representing the value +// // "test" +// "NOT (begins_with (:prefix))" func Not(conditionBuilder ConditionBuilder) ConditionBuilder { return ConditionBuilder{ conditionList: []ConditionBuilder{conditionBuilder}, @@ -864,21 +864,21 @@ func Not(conditionBuilder ConditionBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the item attribute "Name" -// // does not begin with "test" -// condition := expression.Name("Name").BeginsWith("test").Not() +// // condition represents the condition where the item attribute "Name" +// // does not begin with "test" +// condition := expression.Name("Name").BeginsWith("test").Not() // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Name").BeginsWith("test").Not() -// // Let :prefix be an ExpressionAttributeValue representing the value -// // "test" -// "NOT (begins_with (:prefix))" +// expression.Name("Name").BeginsWith("test").Not() +// // Let :prefix be an ExpressionAttributeValue representing the value +// // "test" +// "NOT (begins_with (:prefix))" func (cb ConditionBuilder) Not() ConditionBuilder { return Not(cb) } @@ -890,21 +890,21 @@ func (cb ConditionBuilder) Not() ConditionBuilder { // // Example: // -// // condition represents the condition where the value of the item -// // attribute "Rating" is between values 5 and 10 -// condition := expression.Between(expression.Name("Rating"), expression.Value(5), expression.Value(10)) +// // condition represents the condition where the value of the item +// // attribute "Rating" is between values 5 and 10 +// condition := expression.Between(expression.Name("Rating"), expression.Value(5), expression.Value(10)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Between(expression.Name("Rating"), expression.Value(5), expression.Value(10)) -// // Let :five and :ten be ExpressionAttributeValues representing the value -// // 5 and the value 10 -// "Rating BETWEEN :five AND :ten" +// expression.Between(expression.Name("Rating"), expression.Value(5), expression.Value(10)) +// // Let :five and :ten be ExpressionAttributeValues representing the value +// // 5 and the value 10 +// "Rating BETWEEN :five AND :ten" func Between(op, lower, upper OperandBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{op, lower, upper}, @@ -919,21 +919,21 @@ func Between(op, lower, upper OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the value of the item -// // attribute "Rating" is between values 5 and 10 -// condition := expression.Name("Rating").Between(expression.Value(5), expression.Value(10)) +// // condition represents the condition where the value of the item +// // attribute "Rating" is between values 5 and 10 +// condition := expression.Name("Rating").Between(expression.Value(5), expression.Value(10)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Rating").Between(expression.Value(5), expression.Value(10)) -// // Let :five and :ten be ExpressionAttributeValues representing the value -// // 5 and the value 10 -// "Rating BETWEEN :five AND :ten" +// expression.Name("Rating").Between(expression.Value(5), expression.Value(10)) +// // Let :five and :ten be ExpressionAttributeValues representing the value +// // 5 and the value 10 +// "Rating BETWEEN :five AND :ten" func (nb NameBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { return Between(nb, lower, upper) } @@ -945,21 +945,21 @@ func (nb NameBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the value 6 is between values -// // 5 and 10 -// condition := expression.Value(6).Between(expression.Value(5), expression.Value(10)) +// // condition represents the condition where the value 6 is between values +// // 5 and 10 +// condition := expression.Value(6).Between(expression.Value(5), expression.Value(10)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value(6).Between(expression.Value(5), expression.Value(10)) -// // Let :six, :five and :ten be ExpressionAttributeValues representing the -// // values 6, 5, and 10 respectively -// ":six BETWEEN :five AND :ten" +// expression.Value(6).Between(expression.Value(5), expression.Value(10)) +// // Let :six, :five and :ten be ExpressionAttributeValues representing the +// // values 6, 5, and 10 respectively +// ":six BETWEEN :five AND :ten" func (vb ValueBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { return Between(vb, lower, upper) } @@ -971,21 +971,21 @@ func (vb ValueBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the size of the item -// // attribute "InviteList" is between values 5 and 10 -// condition := expression.Size(expression.Name("InviteList")).Between(expression.Value(5), expression.Value(10)) +// // condition represents the condition where the size of the item +// // attribute "InviteList" is between values 5 and 10 +// condition := expression.Size(expression.Name("InviteList")).Between(expression.Value(5), expression.Value(10)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("InviteList")).Between(expression.Value(5), expression.Value(10)) -// // Let :five and :ten be ExpressionAttributeValues representing the value -// // 5 and the value 10 -// "size (InviteList) BETWEEN :five AND :ten" +// expression.Size(expression.Name("InviteList")).Between(expression.Value(5), expression.Value(10)) +// // Let :five and :ten be ExpressionAttributeValues representing the value +// // 5 and the value 10 +// "size (InviteList) BETWEEN :five AND :ten" func (sb SizeBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { return Between(sb, lower, upper) } @@ -997,22 +997,22 @@ func (sb SizeBuilder) Between(lower, upper OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the value of the item -// // attribute "Color" is checked against the list of colors "red", -// // "green", and "blue". -// condition := expression.In(expression.Name("Color"), expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // condition represents the condition where the value of the item +// // attribute "Color" is checked against the list of colors "red", +// // "green", and "blue". +// condition := expression.In(expression.Name("Color"), expression.Value("red"), expression.Value("green"), expression.Value("blue")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.In(expression.Name("Color"), expression.Value("red"), expression.Value("green"), expression.Value("blue")) -// // Let :red, :green, :blue be ExpressionAttributeValues representing the -// // values "red", "green", and "blue" respectively -// "Color IN (:red, :green, :blue)" +// expression.In(expression.Name("Color"), expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // Let :red, :green, :blue be ExpressionAttributeValues representing the +// // values "red", "green", and "blue" respectively +// "Color IN (:red, :green, :blue)" func In(left, right OperandBuilder, other ...OperandBuilder) ConditionBuilder { other = append([]OperandBuilder{left, right}, other...) return ConditionBuilder{ @@ -1028,22 +1028,22 @@ func In(left, right OperandBuilder, other ...OperandBuilder) ConditionBuilder { // // Example: // -// // condition represents the condition where the value of the item -// // attribute "Color" is checked against the list of colors "red", -// // "green", and "blue". -// condition := expression.Name("Color").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // condition represents the condition where the value of the item +// // attribute "Color" is checked against the list of colors "red", +// // "green", and "blue". +// condition := expression.Name("Color").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Color").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) -// // Let :red, :green, :blue be ExpressionAttributeValues representing the -// // values "red", "green", and "blue" respectively -// "Color IN (:red, :green, :blue)" +// expression.Name("Color").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // Let :red, :green, :blue be ExpressionAttributeValues representing the +// // values "red", "green", and "blue" respectively +// "Color IN (:red, :green, :blue)" func (nb NameBuilder) In(right OperandBuilder, other ...OperandBuilder) ConditionBuilder { return In(nb, right, other...) } @@ -1056,22 +1056,22 @@ func (nb NameBuilder) In(right OperandBuilder, other ...OperandBuilder) Conditio // // Example: // -// // condition represents the condition where the value "yellow" is checked -// // against the list of colors "red", "green", and "blue". -// condition := expression.Value("yellow").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // condition represents the condition where the value "yellow" is checked +// // against the list of colors "red", "green", and "blue". +// condition := expression.Value("yellow").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Value("yellow").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) -// // Let :yellow, :red, :green, :blue be ExpressionAttributeValues -// // representing the values "yellow", "red", "green", and "blue" -// // respectively -// ":yellow IN (:red, :green, :blue)" +// expression.Value("yellow").In(expression.Value("red"), expression.Value("green"), expression.Value("blue")) +// // Let :yellow, :red, :green, :blue be ExpressionAttributeValues +// // representing the values "yellow", "red", "green", and "blue" +// // respectively +// ":yellow IN (:red, :green, :blue)" func (vb ValueBuilder) In(right OperandBuilder, other ...OperandBuilder) ConditionBuilder { return In(vb, right, other...) } @@ -1083,22 +1083,22 @@ func (vb ValueBuilder) In(right OperandBuilder, other ...OperandBuilder) Conditi // // Example: // -// // condition represents the condition where the size of the item -// // attribute "Donuts" is checked against the list of numbers 12, 24, and -// // 36. -// condition := expression.Size(expression.Name("Donuts")).In(expression.Value(12), expression.Value(24), expression.Value(36)) +// // condition represents the condition where the size of the item +// // attribute "Donuts" is checked against the list of numbers 12, 24, and +// // 36. +// condition := expression.Size(expression.Name("Donuts")).In(expression.Value(12), expression.Value(24), expression.Value(36)) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Size(expression.Name("Donuts")).In(expression.Value(12), expression.Value(24), expression.Value(36)) -// // Let :dozen, :twoDozen, :threeDozen be ExpressionAttributeValues -// // representing the values 12, 24, and 36 respectively -// "size (Donuts) IN (12, 24, 36)" +// expression.Size(expression.Name("Donuts")).In(expression.Value(12), expression.Value(24), expression.Value(36)) +// // Let :dozen, :twoDozen, :threeDozen be ExpressionAttributeValues +// // representing the values 12, 24, and 36 respectively +// "size (Donuts) IN (12, 24, 36)" func (sb SizeBuilder) In(right OperandBuilder, other ...OperandBuilder) ConditionBuilder { return In(sb, right, other...) } @@ -1110,19 +1110,19 @@ func (sb SizeBuilder) In(right OperandBuilder, other ...OperandBuilder) Conditio // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" exists or not -// condition := expression.AttributeExists(expression.Name("Age")) +// // condition represents the boolean condition of whether the item +// // attribute "Age" exists or not +// condition := expression.AttributeExists(expression.Name("Age")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.AttributeExists(expression.Name("Age")) -// "attribute_exists (Age))" +// expression.AttributeExists(expression.Name("Age")) +// "attribute_exists (Age))" func AttributeExists(nameBuilder NameBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{nameBuilder}, @@ -1137,19 +1137,19 @@ func AttributeExists(nameBuilder NameBuilder) ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" exists or not -// condition := expression.Name("Age").AttributeExists() +// // condition represents the boolean condition of whether the item +// // attribute "Age" exists or not +// condition := expression.Name("Age").AttributeExists() // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Age").AttributeExists() -// "attribute_exists (Age))" +// expression.Name("Age").AttributeExists() +// "attribute_exists (Age))" func (nb NameBuilder) AttributeExists() ConditionBuilder { return AttributeExists(nb) } @@ -1162,19 +1162,19 @@ func (nb NameBuilder) AttributeExists() ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" exists or not -// condition := expression.AttributeNotExists(expression.Name("Age")) +// // condition represents the boolean condition of whether the item +// // attribute "Age" exists or not +// condition := expression.AttributeNotExists(expression.Name("Age")) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.AttributeNotExists(expression.Name("Age")) -// "attribute_not_exists (Age))" +// expression.AttributeNotExists(expression.Name("Age")) +// "attribute_not_exists (Age))" func AttributeNotExists(nameBuilder NameBuilder) ConditionBuilder { return ConditionBuilder{ operandList: []OperandBuilder{nameBuilder}, @@ -1190,19 +1190,19 @@ func AttributeNotExists(nameBuilder NameBuilder) ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" exists or not -// condition := expression.Name("Age").AttributeNotExists() +// // condition represents the boolean condition of whether the item +// // attribute "Age" exists or not +// condition := expression.Name("Age").AttributeNotExists() // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Age").AttributeNotExists() -// "attribute_not_exists (Age))" +// expression.Name("Age").AttributeNotExists() +// "attribute_not_exists (Age))" func (nb NameBuilder) AttributeNotExists() ConditionBuilder { return AttributeNotExists(nb) } @@ -1215,20 +1215,20 @@ func (nb NameBuilder) AttributeNotExists() ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" has the DynamoDB type Number or not -// condition := expression.AttributeType(expression.Name("Age"), expression.Number) +// // condition represents the boolean condition of whether the item +// // attribute "Age" has the DynamoDB type Number or not +// condition := expression.AttributeType(expression.Name("Age"), expression.Number) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.AttributeType(expression.Name("Age"), expression.Number) -// // Let :type be an ExpressionAttributeValue representing the value "N" -// "attribute_type (Age, :type)" +// expression.AttributeType(expression.Name("Age"), expression.Number) +// // Let :type be an ExpressionAttributeValue representing the value "N" +// "attribute_type (Age, :type)" func AttributeType(nameBuilder NameBuilder, attributeType DynamoDBAttributeType) ConditionBuilder { v := ValueBuilder{ value: string(attributeType), @@ -1247,20 +1247,20 @@ func AttributeType(nameBuilder NameBuilder, attributeType DynamoDBAttributeType) // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "Age" has the DynamoDB type Number or not -// condition := expression.Name("Age").AttributeType(expression.Number) +// // condition represents the boolean condition of whether the item +// // attribute "Age" has the DynamoDB type Number or not +// condition := expression.Name("Age").AttributeType(expression.Number) // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("Age").AttributeType(expression.Number) -// // Let :type be an ExpressionAttributeValue representing the value "N" -// "attribute_type (Age, :type)" +// expression.Name("Age").AttributeType(expression.Number) +// // Let :type be an ExpressionAttributeValue representing the value "N" +// "attribute_type (Age, :type)" func (nb NameBuilder) AttributeType(attributeType DynamoDBAttributeType) ConditionBuilder { return AttributeType(nb, attributeType) } @@ -1272,20 +1272,20 @@ func (nb NameBuilder) AttributeType(attributeType DynamoDBAttributeType) Conditi // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "CodeName" starts with the substring "Ben" -// condition := expression.BeginsWith(expression.Name("CodeName"), "Ben") +// // condition represents the boolean condition of whether the item +// // attribute "CodeName" starts with the substring "Ben" +// condition := expression.BeginsWith(expression.Name("CodeName"), "Ben") // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.BeginsWith(expression.Name("CodeName"), "Ben") -// // Let :ben be an ExpressionAttributeValue representing the value "Ben" -// "begins_with (CodeName, :ben)" +// expression.BeginsWith(expression.Name("CodeName"), "Ben") +// // Let :ben be an ExpressionAttributeValue representing the value "Ben" +// "begins_with (CodeName, :ben)" func BeginsWith(nameBuilder NameBuilder, prefix string) ConditionBuilder { v := ValueBuilder{ value: prefix, @@ -1303,20 +1303,20 @@ func BeginsWith(nameBuilder NameBuilder, prefix string) ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "CodeName" starts with the substring "Ben" -// condition := expression.Name("CodeName").BeginsWith("Ben") +// // condition represents the boolean condition of whether the item +// // attribute "CodeName" starts with the substring "Ben" +// condition := expression.Name("CodeName").BeginsWith("Ben") // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("CodeName").BeginsWith("Ben") -// // Let :ben be an ExpressionAttributeValue representing the value "Ben" -// "begins_with (CodeName, :ben)" +// expression.Name("CodeName").BeginsWith("Ben") +// // Let :ben be an ExpressionAttributeValue representing the value "Ben" +// "begins_with (CodeName, :ben)" func (nb NameBuilder) BeginsWith(prefix string) ConditionBuilder { return BeginsWith(nb, prefix) } @@ -1328,20 +1328,20 @@ func (nb NameBuilder) BeginsWith(prefix string) ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "InviteList" has the value "Ben" -// condition := expression.Contains(expression.Name("InviteList"), "Ben") +// // condition represents the boolean condition of whether the item +// // attribute "InviteList" has the value "Ben" +// condition := expression.Contains(expression.Name("InviteList"), "Ben") // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Contains(expression.Name("InviteList"), "Ben") -// // Let :ben be an ExpressionAttributeValue representing the value "Ben" -// "contains (InviteList, :ben)" +// expression.Contains(expression.Name("InviteList"), "Ben") +// // Let :ben be an ExpressionAttributeValue representing the value "Ben" +// "contains (InviteList, :ben)" func Contains(nameBuilder NameBuilder, substr string) ConditionBuilder { v := ValueBuilder{ value: substr, @@ -1359,20 +1359,20 @@ func Contains(nameBuilder NameBuilder, substr string) ConditionBuilder { // // Example: // -// // condition represents the boolean condition of whether the item -// // attribute "InviteList" has the value "Ben" -// condition := expression.Name("InviteList").Contains("Ben") +// // condition represents the boolean condition of whether the item +// // attribute "InviteList" has the value "Ben" +// condition := expression.Name("InviteList").Contains("Ben") // -// // Used in another Condition Expression -// anotherCondition := expression.Not(condition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithCondition(condition) +// // Used in another Condition Expression +// anotherCondition := expression.Not(condition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithCondition(condition) // // Expression Equivalent: // -// expression.Name("InviteList").Contains("Ben") -// // Let :ben be an ExpressionAttributeValue representing the value "Ben" -// "contains (InviteList, :ben)" +// expression.Name("InviteList").Contains("Ben") +// // Let :ben be an ExpressionAttributeValue representing the value "Ben" +// "contains (InviteList, :ben)" func (nb NameBuilder) Contains(substr string) ConditionBuilder { return Contains(nb, substr) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/doc.go index bdaa0af864..13522ad5bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/doc.go @@ -3,7 +3,7 @@ Package expression provides types and functions to create Amazon DynamoDB Expression strings, ExpressionAttributeNames maps, and ExpressionAttributeValues maps. -Using the Package +# Using the Package The package represents the various DynamoDB Expressions as structs named accordingly. For example, ConditionBuilder represents a DynamoDB Condition @@ -11,10 +11,10 @@ Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. The following example shows a sample ConditionExpression and how to build an equilvalent ConditionBuilder - // Let :a be an ExpressionAttributeValue representing the string "No One You - // Know" - condExpr := "Artist = :a" - condBuilder := expression.Name("Artist").Equal(expression.Value("No One You Know")) + // Let :a be an ExpressionAttributeValue representing the string "No One You + // Know" + condExpr := "Artist = :a" + condBuilder := expression.Name("Artist").Equal(expression.Value("No One You Know")) In order to retrieve the formatted DynamoDB Expression strings, call the getter methods on the Expression struct. To create the Expression struct, call the @@ -23,20 +23,20 @@ QueryInput, can have multiple DynamoDB Expressions, multiple structs representing various DynamoDB Expressions can be added to the Builder struct. The following example shows a generic usage of the whole package. - filt := expression.Name("Artist").Equal(expression.Value("No One You Know")) - proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle")) - expr, err := expression.NewBuilder().WithFilter(filt).WithProjection(proj).Build() - if err != nil { - fmt.Println(err) - } + filt := expression.Name("Artist").Equal(expression.Value("No One You Know")) + proj := expression.NamesList(expression.Name("SongTitle"), expression.Name("AlbumTitle")) + expr, err := expression.NewBuilder().WithFilter(filt).WithProjection(proj).Build() + if err != nil { + fmt.Println(err) + } - input := &dynamodb.ScanInput{ - ExpressionAttributeNames: expr.Names(), - ExpressionAttributeValues: expr.Values(), - FilterExpression: expr.Filter(), - ProjectionExpression: expr.Projection(), - TableName: aws.String("Music"), - } + input := &dynamodb.ScanInput{ + ExpressionAttributeNames: expr.Names(), + ExpressionAttributeValues: expr.Values(), + FilterExpression: expr.Filter(), + ProjectionExpression: expr.Projection(), + TableName: aws.String("Music"), + } The ExpressionAttributeNames and ExpressionAttributeValues member of the input struct must always be assigned when using the Expression struct because all item diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/error.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/error.go index 7378d7e219..3229686f1f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/error.go @@ -12,8 +12,8 @@ import ( // // Example: // -// // err is of type InvalidParameterError -// _, err := expression.Name("foo..bar").BuildOperand() +// // err is of type InvalidParameterError +// _, err := expression.Name("foo..bar").BuildOperand() type InvalidParameterError struct { parameterType string functionName string @@ -37,11 +37,11 @@ func newInvalidParameterError(funcName, paramType string) InvalidParameterError // // Example: // -// // err is of type UnsetParameterError -// _, err := expression.Builder{}.Build() -// _, err := expression.NewBuilder(). -// WithCondition(expression.ConditionBuilder{}). -// Build() +// // err is of type UnsetParameterError +// _, err := expression.Builder{}.Build() +// _, err := expression.NewBuilder(). +// WithCondition(expression.ConditionBuilder{}). +// Build() type UnsetParameterError struct { parameterType string functionName string diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/expression.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/expression.go index a41eb60ad1..b6e17c4139 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/expression.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/expression.go @@ -42,19 +42,19 @@ func (l typeList) Swap(i, j int) { // // Example: // -// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) -// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) -// -// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) -// expr := builder.Build() -// -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expr.KeyCondition(), -// ProjectionExpression: expr.Projection(), -// ExpressionAttributeNames: expr.Names(), -// ExpressionAttributeValues: expr.Values(), -// TableName: aws.String("SomeTable"), -// } +// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) +// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) +// +// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) +// expr := builder.Build() +// +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expr.KeyCondition(), +// ProjectionExpression: expr.Projection(), +// ExpressionAttributeNames: expr.Names(), +// ExpressionAttributeValues: expr.Values(), +// TableName: aws.String("SomeTable"), +// } type Builder struct { expressionMap map[expressionType]treeBuilder } @@ -66,9 +66,9 @@ type Builder struct { // // Example: // -// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) -// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) -// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) +// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) +// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) +// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) func NewBuilder() Builder { return Builder{} } @@ -81,23 +81,23 @@ func NewBuilder() Builder { // // Example: // -// // keyCond represents the Key Condition Expression -// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) -// // proj represents the Projection Expression -// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) -// -// // Add keyCond and proj to builder as a Key Condition and Projection -// // respectively -// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) -// expr := builder.Build() -// -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expr.KeyCondition(), -// ProjectionExpression: expr.Projection(), -// ExpressionAttributeNames: expr.Names(), -// ExpressionAttributeValues: expr.Values(), -// TableName: aws.String("SomeTable"), -// } +// // keyCond represents the Key Condition Expression +// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) +// // proj represents the Projection Expression +// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) +// +// // Add keyCond and proj to builder as a Key Condition and Projection +// // respectively +// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) +// expr := builder.Build() +// +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expr.KeyCondition(), +// ProjectionExpression: expr.Projection(), +// ExpressionAttributeNames: expr.Names(), +// ExpressionAttributeValues: expr.Values(), +// TableName: aws.String("SomeTable"), +// } func (b Builder) Build() (Expression, error) { if b.expressionMap == nil { return Expression{}, newUnsetParameterError("Build", "Builder") @@ -169,15 +169,15 @@ func (b Builder) buildChildTrees() (aliasList, map[expressionType]string, error) // // Example: // -// // let builder be an existing Builder{} and cond be an existing -// // ConditionBuilder{} -// builder = builder.WithCondition(cond) +// // let builder be an existing Builder{} and cond be an existing +// // ConditionBuilder{} +// builder = builder.WithCondition(cond) // -// // add other DynamoDB Expressions to the builder. let proj be an already -// // existing ProjectionBuilder -// builder = builder.WithProjection(proj) -// // create an Expression struct -// expr := builder.Build() +// // add other DynamoDB Expressions to the builder. let proj be an already +// // existing ProjectionBuilder +// builder = builder.WithProjection(proj) +// // create an Expression struct +// expr := builder.Build() func (b Builder) WithCondition(conditionBuilder ConditionBuilder) Builder { if b.expressionMap == nil { b.expressionMap = map[expressionType]treeBuilder{} @@ -193,15 +193,15 @@ func (b Builder) WithCondition(conditionBuilder ConditionBuilder) Builder { // // Example: // -// // let builder be an existing Builder{} and proj be an existing -// // ProjectionBuilder{} -// builder = builder.WithProjection(proj) +// // let builder be an existing Builder{} and proj be an existing +// // ProjectionBuilder{} +// builder = builder.WithProjection(proj) // -// // add other DynamoDB Expressions to the builder. let cond be an already -// // existing ConditionBuilder -// builder = builder.WithCondition(cond) -// // create an Expression struct -// expr := builder.Build() +// // add other DynamoDB Expressions to the builder. let cond be an already +// // existing ConditionBuilder +// builder = builder.WithCondition(cond) +// // create an Expression struct +// expr := builder.Build() func (b Builder) WithProjection(projectionBuilder ProjectionBuilder) Builder { if b.expressionMap == nil { b.expressionMap = map[expressionType]treeBuilder{} @@ -217,15 +217,15 @@ func (b Builder) WithProjection(projectionBuilder ProjectionBuilder) Builder { // // Example: // -// // let builder be an existing Builder{} and keyCond be an existing -// // KeyConditionBuilder{} -// builder = builder.WithKeyCondition(keyCond) +// // let builder be an existing Builder{} and keyCond be an existing +// // KeyConditionBuilder{} +// builder = builder.WithKeyCondition(keyCond) // -// // add other DynamoDB Expressions to the builder. let cond be an already -// // existing ConditionBuilder -// builder = builder.WithCondition(cond) -// // create an Expression struct -// expr := builder.Build() +// // add other DynamoDB Expressions to the builder. let cond be an already +// // existing ConditionBuilder +// builder = builder.WithCondition(cond) +// // create an Expression struct +// expr := builder.Build() func (b Builder) WithKeyCondition(keyConditionBuilder KeyConditionBuilder) Builder { if b.expressionMap == nil { b.expressionMap = map[expressionType]treeBuilder{} @@ -241,15 +241,15 @@ func (b Builder) WithKeyCondition(keyConditionBuilder KeyConditionBuilder) Build // // Example: // -// // let builder be an existing Builder{} and filt be an existing -// // ConditionBuilder{} -// builder = builder.WithFilter(filt) +// // let builder be an existing Builder{} and filt be an existing +// // ConditionBuilder{} +// builder = builder.WithFilter(filt) // -// // add other DynamoDB Expressions to the builder. let cond be an already -// // existing ConditionBuilder -// builder = builder.WithCondition(cond) -// // create an Expression struct -// expr := builder.Build() +// // add other DynamoDB Expressions to the builder. let cond be an already +// // existing ConditionBuilder +// builder = builder.WithCondition(cond) +// // create an Expression struct +// expr := builder.Build() func (b Builder) WithFilter(filterBuilder ConditionBuilder) Builder { if b.expressionMap == nil { b.expressionMap = map[expressionType]treeBuilder{} @@ -265,15 +265,15 @@ func (b Builder) WithFilter(filterBuilder ConditionBuilder) Builder { // // Example: // -// // let builder be an existing Builder{} and update be an existing -// // UpdateBuilder{} -// builder = builder.WithUpdate(update) +// // let builder be an existing Builder{} and update be an existing +// // UpdateBuilder{} +// builder = builder.WithUpdate(update) // -// // add other DynamoDB Expressions to the builder. let cond be an already -// // existing ConditionBuilder -// builder = builder.WithCondition(cond) -// // create an Expression struct -// expr := builder.Build() +// // add other DynamoDB Expressions to the builder. let cond be an already +// // existing ConditionBuilder +// builder = builder.WithCondition(cond) +// // create an Expression struct +// expr := builder.Build() func (b Builder) WithUpdate(updateBuilder UpdateBuilder) Builder { if b.expressionMap == nil { b.expressionMap = map[expressionType]treeBuilder{} @@ -288,23 +288,23 @@ func (b Builder) WithUpdate(updateBuilder UpdateBuilder) Builder { // // Example: // -// // keyCond represents the Key Condition Expression -// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) -// // proj represents the Projection Expression -// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) -// -// // Add keyCond and proj to builder as a Key Condition and Projection -// // respectively -// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) -// expr := builder.Build() -// -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expr.KeyCondition(), -// ProjectionExpression: expr.Projection(), -// ExpressionAttributeNames: expr.Names(), -// ExpressionAttributeValues: expr.Values(), -// TableName: aws.String("SomeTable"), -// } +// // keyCond represents the Key Condition Expression +// keyCond := expression.Key("someKey").Equal(expression.Value("someValue")) +// // proj represents the Projection Expression +// proj := expression.NamesList(expression.Name("aName"), expression.Name("anotherName"), expression.Name("oneOtherName")) +// +// // Add keyCond and proj to builder as a Key Condition and Projection +// // respectively +// builder := expression.NewBuilder().WithKeyCondition(keyCond).WithProjection(proj) +// expr := builder.Build() +// +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expr.KeyCondition(), +// ProjectionExpression: expr.Projection(), +// ExpressionAttributeNames: expr.Names(), +// ExpressionAttributeValues: expr.Values(), +// TableName: aws.String("SomeTable"), +// } type Expression struct { expressionMap map[expressionType]string namesMap map[string]*string @@ -328,19 +328,19 @@ type treeBuilder interface { // // Example: // -// // let expression be an instance of Expression{} -// -// deleteInput := dynamodb.DeleteItemInput{ -// ConditionExpression: expression.Condition(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// Key: map[string]*dynamodb.AttributeValue{ -// "PartitionKey": &dynamodb.AttributeValue{ -// S: aws.String("SomeKey"), -// }, -// }, -// TableName: aws.String("SomeTable"), -// } +// // let expression be an instance of Expression{} +// +// deleteInput := dynamodb.DeleteItemInput{ +// ConditionExpression: expression.Condition(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// Key: map[string]*dynamodb.AttributeValue{ +// "PartitionKey": &dynamodb.AttributeValue{ +// S: aws.String("SomeKey"), +// }, +// }, +// TableName: aws.String("SomeTable"), +// } func (e Expression) Condition() *string { return e.returnExpression(condition) } @@ -352,15 +352,15 @@ func (e Expression) Condition() *string { // // Example: // -// // let expression be an instance of Expression{} +// // let expression be an instance of Expression{} // -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expression.KeyCondition(), -// FilterExpression: expression.Filter(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expression.KeyCondition(), +// FilterExpression: expression.Filter(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) Filter() *string { return e.returnExpression(filter) } @@ -372,15 +372,15 @@ func (e Expression) Filter() *string { // // Example: // -// // let expression be an instance of Expression{} +// // let expression be an instance of Expression{} // -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expression.KeyCondition(), -// ProjectionExpression: expression.Projection(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expression.KeyCondition(), +// ProjectionExpression: expression.Projection(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) Projection() *string { return e.returnExpression(projection) } @@ -392,15 +392,15 @@ func (e Expression) Projection() *string { // // Example: // -// // let expression be an instance of Expression{} +// // let expression be an instance of Expression{} // -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expression.KeyCondition(), -// ProjectionExpression: expression.Projection(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expression.KeyCondition(), +// ProjectionExpression: expression.Projection(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) KeyCondition() *string { return e.returnExpression(keyCondition) } @@ -412,19 +412,19 @@ func (e Expression) KeyCondition() *string { // // Example: // -// // let expression be an instance of Expression{} -// -// updateInput := dynamodb.UpdateInput{ -// Key: map[string]*dynamodb.AttributeValue{ -// "PartitionKey": { -// S: aws.String("someKey"), -// }, -// }, -// UpdateExpression: expression.Update(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// // let expression be an instance of Expression{} +// +// updateInput := dynamodb.UpdateInput{ +// Key: map[string]*dynamodb.AttributeValue{ +// "PartitionKey": { +// S: aws.String("someKey"), +// }, +// }, +// UpdateExpression: expression.Update(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) Update() *string { return e.returnExpression(update) } @@ -442,15 +442,15 @@ func (e Expression) Update() *string { // // Example: // -// // let expression be an instance of Expression{} +// // let expression be an instance of Expression{} // -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expression.KeyCondition(), -// ProjectionExpression: expression.Projection(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expression.KeyCondition(), +// ProjectionExpression: expression.Projection(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) Names() map[string]*string { return e.namesMap } @@ -468,15 +468,15 @@ func (e Expression) Names() map[string]*string { // // Example: // -// // let expression be an instance of Expression{} +// // let expression be an instance of Expression{} // -// queryInput := dynamodb.QueryInput{ -// KeyConditionExpression: expression.KeyCondition(), -// ProjectionExpression: expression.Projection(), -// ExpressionAttributeNames: expression.Names(), -// ExpressionAttributeValues: expression.Values(), -// TableName: aws.String("SomeTable"), -// } +// queryInput := dynamodb.QueryInput{ +// KeyConditionExpression: expression.KeyCondition(), +// ProjectionExpression: expression.Projection(), +// ExpressionAttributeNames: expression.Names(), +// ExpressionAttributeValues: expression.Values(), +// TableName: aws.String("SomeTable"), +// } func (e Expression) Values() map[string]*dynamodb.AttributeValue { return e.valuesMap } @@ -501,12 +501,13 @@ func (e Expression) returnExpression(expressionType expressionType) *string { // fmtExpr is a string that has escaped characters to refer to // names/values/children which needs to be aliased at runtime in order to avoid // duplicate values. The rules are as follows: -// $n: Indicates that an alias of a name needs to be inserted. The -// corresponding name to be alias is in the []names slice. -// $v: Indicates that an alias of a value needs to be inserted. The -// corresponding value to be alias is in the []values slice. -// $c: Indicates that the fmtExpr of a child exprNode needs to be inserted. -// The corresponding child node is in the []children slice. +// +// $n: Indicates that an alias of a name needs to be inserted. The +// corresponding name to be alias is in the []names slice. +// $v: Indicates that an alias of a value needs to be inserted. The +// corresponding value to be alias is in the []values slice. +// $c: Indicates that the fmtExpr of a child exprNode needs to be inserted. +// The corresponding child node is in the []children slice. type exprNode struct { names []string values []dynamodb.AttributeValue diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/key_condition.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/key_condition.go index e8917b6774..116b0281c7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/key_condition.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/key_condition.go @@ -47,20 +47,20 @@ type KeyConditionBuilder struct { // // Example: // -// // keyCondition represents the equal clause of the key "foo" and the -// // value 5 -// keyCondition := expression.KeyEqual(expression.Key("foo"), expression.Value(5)) +// // keyCondition represents the equal clause of the key "foo" and the +// // value 5 +// keyCondition := expression.KeyEqual(expression.Key("foo"), expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithKeyCondition(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithKeyCondition(keyCondition) // // Expression Equivalent: // -// expression.KeyEqual(expression.Key("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo = :five" +// expression.KeyEqual(expression.Key("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo = :five" func KeyEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, valueBuilder}, @@ -75,20 +75,20 @@ func KeyEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuil // // Example: // -// // keyCondition represents the equal clause of the key "foo" and the -// // value 5 -// keyCondition := expression.Key("foo").Equal(expression.Value(5)) +// // keyCondition represents the equal clause of the key "foo" and the +// // value 5 +// keyCondition := expression.Key("foo").Equal(expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) -// // Used to make an Builder -// builder := expression.NewBuilder().WithKeyCondition(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithKeyCondition(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").Equal(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo = :five" +// expression.Key("foo").Equal(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo = :five" func (kb KeyBuilder) Equal(valueBuilder ValueBuilder) KeyConditionBuilder { return KeyEqual(kb, valueBuilder) } @@ -99,18 +99,18 @@ func (kb KeyBuilder) Equal(valueBuilder ValueBuilder) KeyConditionBuilder { // // Example: // -// // keyCondition represents the less than clause of the key "foo" and the -// // value 5 -// keyCondition := expression.KeyLessThan(expression.Key("foo"), expression.Value(5)) +// // keyCondition represents the less than clause of the key "foo" and the +// // value 5 +// keyCondition := expression.KeyLessThan(expression.Key("foo"), expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyLessThan(expression.Key("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo < :five" +// expression.KeyLessThan(expression.Key("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo < :five" func KeyLessThan(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, valueBuilder}, @@ -124,18 +124,18 @@ func KeyLessThan(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionB // // Example: // -// // keyCondition represents the less than clause of the key "foo" and the -// // value 5 -// keyCondition := expression.Key("foo").LessThan(expression.Value(5)) +// // keyCondition represents the less than clause of the key "foo" and the +// // value 5 +// keyCondition := expression.Key("foo").LessThan(expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").LessThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo < :five" +// expression.Key("foo").LessThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo < :five" func (kb KeyBuilder) LessThan(valueBuilder ValueBuilder) KeyConditionBuilder { return KeyLessThan(kb, valueBuilder) } @@ -146,18 +146,18 @@ func (kb KeyBuilder) LessThan(valueBuilder ValueBuilder) KeyConditionBuilder { // // Example: // -// // keyCondition represents the less than equal to clause of the key -// // "foo" and the value 5 -// keyCondition := expression.KeyLessThanEqual(expression.Key("foo"), expression.Value(5)) +// // keyCondition represents the less than equal to clause of the key +// // "foo" and the value 5 +// keyCondition := expression.KeyLessThanEqual(expression.Key("foo"), expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyLessThanEqual(expression.Key("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <= :five" +// expression.KeyLessThanEqual(expression.Key("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <= :five" func KeyLessThanEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, valueBuilder}, @@ -171,18 +171,18 @@ func KeyLessThanEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyCondi // // Example: // -// // keyCondition represents the less than equal to clause of the key -// // "foo" and the value 5 -// keyCondition := expression.Key("foo").LessThanEqual(expression.Value(5)) +// // keyCondition represents the less than equal to clause of the key +// // "foo" and the value 5 +// keyCondition := expression.Key("foo").LessThanEqual(expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").LessThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo <= :five" +// expression.Key("foo").LessThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo <= :five" func (kb KeyBuilder) LessThanEqual(valueBuilder ValueBuilder) KeyConditionBuilder { return KeyLessThanEqual(kb, valueBuilder) } @@ -193,18 +193,18 @@ func (kb KeyBuilder) LessThanEqual(valueBuilder ValueBuilder) KeyConditionBuilde // // Example: // -// // keyCondition represents the greater than clause of the key "foo" and -// // the value 5 -// keyCondition := expression.KeyGreaterThan(expression.Key("foo"), expression.Value(5)) +// // keyCondition represents the greater than clause of the key "foo" and +// // the value 5 +// keyCondition := expression.KeyGreaterThan(expression.Key("foo"), expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyGreaterThan(expression.Key("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo > :five" +// expression.KeyGreaterThan(expression.Key("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo > :five" func KeyGreaterThan(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, valueBuilder}, @@ -218,18 +218,18 @@ func KeyGreaterThan(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditi // // Example: // -// // key condition represents the greater than clause of the key "foo" and -// // the value 5 -// keyCondition := expression.Key("foo").GreaterThan(expression.Value(5)) +// // key condition represents the greater than clause of the key "foo" and +// // the value 5 +// keyCondition := expression.Key("foo").GreaterThan(expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").GreaterThan(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo > :five" +// expression.Key("foo").GreaterThan(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo > :five" func (kb KeyBuilder) GreaterThan(valueBuilder ValueBuilder) KeyConditionBuilder { return KeyGreaterThan(kb, valueBuilder) } @@ -241,18 +241,18 @@ func (kb KeyBuilder) GreaterThan(valueBuilder ValueBuilder) KeyConditionBuilder // // Example: // -// // keyCondition represents the greater than equal to clause of the key -// // "foo" and the value 5 -// keyCondition := expression.KeyGreaterThanEqual(expression.Key("foo"), expression.Value(5)) +// // keyCondition represents the greater than equal to clause of the key +// // "foo" and the value 5 +// keyCondition := expression.KeyGreaterThanEqual(expression.Key("foo"), expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyGreaterThanEqual(expression.Key("foo"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo >= :five" +// expression.KeyGreaterThanEqual(expression.Key("foo"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo >= :five" func KeyGreaterThanEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, valueBuilder}, @@ -266,18 +266,18 @@ func KeyGreaterThanEqual(keyBuilder KeyBuilder, valueBuilder ValueBuilder) KeyCo // // Example: // -// // keyCondition represents the greater than equal to clause of the key -// // "foo" and the value 5 -// keyCondition := expression.Key("foo").GreaterThanEqual(expression.Value(5)) +// // keyCondition represents the greater than equal to clause of the key +// // "foo" and the value 5 +// keyCondition := expression.Key("foo").GreaterThanEqual(expression.Value(5)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").GreaterThanEqual(expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "foo >= :five" +// expression.Key("foo").GreaterThanEqual(expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "foo >= :five" func (kb KeyBuilder) GreaterThanEqual(valueBuilder ValueBuilder) KeyConditionBuilder { return KeyGreaterThanEqual(kb, valueBuilder) } @@ -289,21 +289,21 @@ func (kb KeyBuilder) GreaterThanEqual(valueBuilder ValueBuilder) KeyConditionBui // // Example: // -// // keyCondition represents the key condition where the partition key -// // "TeamName" is equal to value "Wildcats" and sort key "Number" is equal -// // to value 1 -// keyCondition := expression.KeyAnd(expression.Key("TeamName").Equal(expression.Value("Wildcats")), expression.Key("Number").Equal(expression.Value(1))) +// // keyCondition represents the key condition where the partition key +// // "TeamName" is equal to value "Wildcats" and sort key "Number" is equal +// // to value 1 +// keyCondition := expression.KeyAnd(expression.Key("TeamName").Equal(expression.Value("Wildcats")), expression.Key("Number").Equal(expression.Value(1))) // -// // Used to make an Builder -// builder := expression.NewBuilder().WithKeyCondition(keyCondition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithKeyCondition(keyCondition) // // Expression Equivalent: // -// expression.KeyAnd(expression.Key("TeamName").Equal(expression.Value("Wildcats")), expression.Key("Number").Equal(expression.Value(1))) -// // Let #NUMBER, :teamName, and :one be ExpressionAttributeName and -// // ExpressionAttributeValues representing the item attribute "Number", -// // the value "Wildcats", and the value 1 -// "(TeamName = :teamName) AND (#NUMBER = :one)" +// expression.KeyAnd(expression.Key("TeamName").Equal(expression.Value("Wildcats")), expression.Key("Number").Equal(expression.Value(1))) +// // Let #NUMBER, :teamName, and :one be ExpressionAttributeName and +// // ExpressionAttributeValues representing the item attribute "Number", +// // the value "Wildcats", and the value 1 +// "(TeamName = :teamName) AND (#NUMBER = :one)" func KeyAnd(left, right KeyConditionBuilder) KeyConditionBuilder { if left.mode != equalKeyCond { return KeyConditionBuilder{ @@ -328,21 +328,21 @@ func KeyAnd(left, right KeyConditionBuilder) KeyConditionBuilder { // // Example: // -// // keyCondition represents the key condition where the partition key -// // "TeamName" is equal to value "Wildcats" and sort key "Number" is equal -// // to value 1 -// keyCondition := expression.Key("TeamName").Equal(expression.Value("Wildcats")).And(expression.Key("Number").Equal(expression.Value(1))) +// // keyCondition represents the key condition where the partition key +// // "TeamName" is equal to value "Wildcats" and sort key "Number" is equal +// // to value 1 +// keyCondition := expression.Key("TeamName").Equal(expression.Value("Wildcats")).And(expression.Key("Number").Equal(expression.Value(1))) // -// // Used to make an Builder -// builder := expression.NewBuilder().WithKeyCondition(keyCondition) +// // Used to make an Builder +// builder := expression.NewBuilder().WithKeyCondition(keyCondition) // // Expression Equivalent: // -// expression.Key("TeamName").Equal(expression.Value("Wildcats")).And(expression.Key("Number").Equal(expression.Value(1))) -// // Let #NUMBER, :teamName, and :one be ExpressionAttributeName and -// // ExpressionAttributeValues representing the item attribute "Number", -// // the value "Wildcats", and the value 1 -// "(TeamName = :teamName) AND (#NUMBER = :one)" +// expression.Key("TeamName").Equal(expression.Value("Wildcats")).And(expression.Key("Number").Equal(expression.Value(1))) +// // Let #NUMBER, :teamName, and :one be ExpressionAttributeName and +// // ExpressionAttributeValues representing the item attribute "Number", +// // the value "Wildcats", and the value 1 +// "(TeamName = :teamName) AND (#NUMBER = :one)" func (kcb KeyConditionBuilder) And(right KeyConditionBuilder) KeyConditionBuilder { return KeyAnd(kcb, right) } @@ -353,19 +353,19 @@ func (kcb KeyConditionBuilder) And(right KeyConditionBuilder) KeyConditionBuilde // // Example: // -// // keyCondition represents the boolean key condition of whether the value -// // of the key "foo" is between values 5 and 10 -// keyCondition := expression.KeyBetween(expression.Key("foo"), expression.Value(5), expression.Value(10)) +// // keyCondition represents the boolean key condition of whether the value +// // of the key "foo" is between values 5 and 10 +// keyCondition := expression.KeyBetween(expression.Key("foo"), expression.Value(5), expression.Value(10)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyBetween(expression.Key("foo"), expression.Value(5), expression.Value(10)) -// // Let :five and :ten be ExpressionAttributeValues representing the -// // values 5 and 10 respectively -// "foo BETWEEN :five AND :ten" +// expression.KeyBetween(expression.Key("foo"), expression.Value(5), expression.Value(10)) +// // Let :five and :ten be ExpressionAttributeValues representing the +// // values 5 and 10 respectively +// "foo BETWEEN :five AND :ten" func KeyBetween(keyBuilder KeyBuilder, lower, upper ValueBuilder) KeyConditionBuilder { return KeyConditionBuilder{ operandList: []OperandBuilder{keyBuilder, lower, upper}, @@ -379,19 +379,19 @@ func KeyBetween(keyBuilder KeyBuilder, lower, upper ValueBuilder) KeyConditionBu // // Example: // -// // keyCondition represents the boolean key condition of whether the value -// // of the key "foo" is between values 5 and 10 -// keyCondition := expression.Key("foo").Between(expression.Value(5), expression.Value(10)) +// // keyCondition represents the boolean key condition of whether the value +// // of the key "foo" is between values 5 and 10 +// keyCondition := expression.Key("foo").Between(expression.Value(5), expression.Value(10)) // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").Between(expression.Value(5), expression.Value(10)) -// // Let :five and :ten be ExpressionAttributeValues representing the -// // values 5 and 10 respectively -// "foo BETWEEN :five AND :ten" +// expression.Key("foo").Between(expression.Value(5), expression.Value(10)) +// // Let :five and :ten be ExpressionAttributeValues representing the +// // values 5 and 10 respectively +// "foo BETWEEN :five AND :ten" func (kb KeyBuilder) Between(lower, upper ValueBuilder) KeyConditionBuilder { return KeyBetween(kb, lower, upper) } @@ -402,18 +402,18 @@ func (kb KeyBuilder) Between(lower, upper ValueBuilder) KeyConditionBuilder { // // Example: // -// // keyCondition represents the boolean key condition of whether the value -// // of the key "foo" is begins with the prefix "bar" -// keyCondition := expression.KeyBeginsWith(expression.Key("foo"), "bar") +// // keyCondition represents the boolean key condition of whether the value +// // of the key "foo" is begins with the prefix "bar" +// keyCondition := expression.KeyBeginsWith(expression.Key("foo"), "bar") // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.KeyBeginsWith(expression.Key("foo"), "bar") -// // Let :bar be an ExpressionAttributeValue representing the value "bar" -// "begins_with(foo, :bar)" +// expression.KeyBeginsWith(expression.Key("foo"), "bar") +// // Let :bar be an ExpressionAttributeValue representing the value "bar" +// "begins_with(foo, :bar)" func KeyBeginsWith(keyBuilder KeyBuilder, prefix string) KeyConditionBuilder { valueBuilder := ValueBuilder{ value: prefix, @@ -430,18 +430,18 @@ func KeyBeginsWith(keyBuilder KeyBuilder, prefix string) KeyConditionBuilder { // // Example: // -// // keyCondition represents the boolean key condition of whether the value -// // of the key "foo" is begins with the prefix "bar" -// keyCondition := expression.Key("foo").BeginsWith("bar") +// // keyCondition represents the boolean key condition of whether the value +// // of the key "foo" is begins with the prefix "bar" +// keyCondition := expression.Key("foo").BeginsWith("bar") // -// // Used in another Key Condition Expression -// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) +// // Used in another Key Condition Expression +// anotherKeyCondition := expression.Key("partitionKey").Equal(expression.Value("aValue")).And(keyCondition) // // Expression Equivalent: // -// expression.Key("foo").BeginsWith("bar") -// // Let :bar be an ExpressionAttributeValue representing the value "bar" -// "begins_with(foo, :bar)" +// expression.Key("foo").BeginsWith("bar") +// // Let :bar be an ExpressionAttributeValue representing the value "bar" +// "begins_with(foo, :bar)" func (kb KeyBuilder) BeginsWith(prefix string) KeyConditionBuilder { return KeyBeginsWith(kb, prefix) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/operand.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/operand.go index 58bb83cce6..7f52706666 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/operand.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/operand.go @@ -15,8 +15,8 @@ import ( // // Example: // -// // Create a ValueBuilder representing the string "aValue" -// valueBuilder := expression.Value("aValue") +// // Create a ValueBuilder representing the string "aValue" +// valueBuilder := expression.Value("aValue") type ValueBuilder struct { value interface{} } @@ -29,8 +29,8 @@ type ValueBuilder struct { // // Example: // -// // Create a NameBuilder representing the item attribute "aName" -// nameBuilder := expression.Name("aName") +// // Create a NameBuilder representing the item attribute "aName" +// nameBuilder := expression.Name("aName") type NameBuilder struct { name string } @@ -44,9 +44,9 @@ type NameBuilder struct { // // Example: // -// // Create a SizeBuilder representing the size of the item attribute -// // "aName" -// sizeBuilder := expression.Name("aName").Size() +// // Create a SizeBuilder representing the size of the item attribute +// // "aName" +// sizeBuilder := expression.Name("aName").Size() type SizeBuilder struct { nameBuilder NameBuilder } @@ -61,8 +61,8 @@ type SizeBuilder struct { // // Example: // -// // Create a KeyBuilder representing the item key "aKey" -// keyBuilder := expression.Key("aKey") +// // Create a KeyBuilder representing the item key "aKey" +// keyBuilder := expression.Key("aKey") type KeyBuilder struct { key string } @@ -82,10 +82,12 @@ const ( // SetValueBuilder represents the outcome of operator functions supported by the // DynamoDB Set operation. The operator functions are the following: -// Plus() // Represents the "+" operator -// Minus() // Represents the "-" operator -// ListAppend() -// IfNotExists() +// +// Plus() // Represents the "+" operator +// Minus() // Represents the "-" operator +// ListAppend() +// IfNotExists() +// // For documentation on the above functions, // see: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.UpdateExpressions.html#Expressions.UpdateExpressions.SET // Since SetValueBuilder represents an operand, it implements the OperandBuilder @@ -125,12 +127,12 @@ type OperandBuilder interface { // // Example: // -// // Specify a top-level attribute -// name := expression.Name("TopLevel") -// // Specify a nested attribute -// nested := expression.Name("Record[6].SongList") -// // Use Name() to create a condition expression -// condition := expression.Name("foo").Equal(expression.Name("bar")) +// // Specify a top-level attribute +// name := expression.Name("TopLevel") +// // Specify a nested attribute +// nested := expression.Name("Record[6].SongList") +// // Use Name() to create a condition expression +// condition := expression.Name("foo").Equal(expression.Name("bar")) func Name(name string) NameBuilder { return NameBuilder{ name: name, @@ -147,10 +149,10 @@ func Name(name string) NameBuilder { // // Example: // -// // Use Value() to create a condition expression -// condition := expression.Name("foo").Equal(expression.Value(10)) -// // Use Value() to set the value of a set expression. -// update := Set(expression.Name("greets"), expression.Value((&dynamodb.AttributeValue{}).SetS("hello"))) +// // Use Value() to create a condition expression +// condition := expression.Name("foo").Equal(expression.Value(10)) +// // Use Value() to set the value of a set expression. +// update := Set(expression.Name("greets"), expression.Value((&dynamodb.AttributeValue{}).SetS("hello"))) func Value(value interface{}) ValueBuilder { return ValueBuilder{ value: value, @@ -166,13 +168,13 @@ func Value(value interface{}) ValueBuilder { // // Example: // -// // Use Size() to create a condition expression -// condition := expression.Name("foo").Size().Equal(expression.Value(10)) +// // Use Size() to create a condition expression +// condition := expression.Name("foo").Size().Equal(expression.Value(10)) // // Expression Equivalent: // -// expression.Name("aName").Size() -// "size (aName)" +// expression.Name("aName").Size() +// "size (aName)" func (nb NameBuilder) Size() SizeBuilder { return SizeBuilder{ nameBuilder: nb, @@ -188,13 +190,13 @@ func (nb NameBuilder) Size() SizeBuilder { // // Example: // -// // Use Size() to create a condition expression -// condition := expression.Size(expression.Name("foo")).Equal(expression.Value(10)) +// // Use Size() to create a condition expression +// condition := expression.Size(expression.Name("foo")).Equal(expression.Value(10)) // // Expression Equivalent: // -// expression.Size(expression.Name("aName")) -// "size (aName)" +// expression.Size(expression.Name("aName")) +// "size (aName)" func Size(nameBuilder NameBuilder) SizeBuilder { return nameBuilder.Size() } @@ -206,8 +208,8 @@ func Size(nameBuilder NameBuilder) SizeBuilder { // // Example: // -// // Use Key() to create a key condition expression -// keyCondition := expression.Key("foo").Equal(expression.Value("bar")) +// // Use Key() to create a key condition expression +// keyCondition := expression.Key("foo").Equal(expression.Value("bar")) func Key(key string) KeyBuilder { return KeyBuilder{ key: key, @@ -222,15 +224,15 @@ func Key(key string) KeyBuilder { // // Example: // -// // Use Plus() to set the value of the item attribute "someName" to 5 + 10 -// update, err := expression.Set(expression.Name("someName"), expression.Plus(expression.Value(5), expression.Value(10))) +// // Use Plus() to set the value of the item attribute "someName" to 5 + 10 +// update, err := expression.Set(expression.Name("someName"), expression.Plus(expression.Value(5), expression.Value(10))) // // Expression Equivalent: // -// expression.Plus(expression.Value(5), expression.Value(10)) -// // let :five and :ten be ExpressionAttributeValues for the values 5 and -// // 10 respectively. -// ":five + :ten" +// expression.Plus(expression.Value(5), expression.Value(10)) +// // let :five and :ten be ExpressionAttributeValues for the values 5 and +// // 10 respectively. +// ":five + :ten" func Plus(leftOperand, rightOperand OperandBuilder) SetValueBuilder { return SetValueBuilder{ leftOperand: leftOperand, @@ -247,15 +249,15 @@ func Plus(leftOperand, rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use Plus() to set the value of the item attribute "someName" to the -// // numeric value of item attribute "aName" incremented by 10 -// update, err := expression.Set(expression.Name("someName"), expression.Name("aName").Plus(expression.Value(10))) +// // Use Plus() to set the value of the item attribute "someName" to the +// // numeric value of item attribute "aName" incremented by 10 +// update, err := expression.Set(expression.Name("someName"), expression.Name("aName").Plus(expression.Value(10))) // // Expression Equivalent: // -// expression.Name("aName").Plus(expression.Value(10)) -// // let :ten be ExpressionAttributeValues representing the value 10 -// "aName + :ten" +// expression.Name("aName").Plus(expression.Value(10)) +// // let :ten be ExpressionAttributeValues representing the value 10 +// "aName + :ten" func (nb NameBuilder) Plus(rightOperand OperandBuilder) SetValueBuilder { return Plus(nb, rightOperand) } @@ -268,15 +270,15 @@ func (nb NameBuilder) Plus(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use Plus() to set the value of the item attribute "someName" to 5 + 10 -// update, err := expression.Set(expression.Name("someName"), expression.Value(5).Plus(expression.Value(10))) +// // Use Plus() to set the value of the item attribute "someName" to 5 + 10 +// update, err := expression.Set(expression.Name("someName"), expression.Value(5).Plus(expression.Value(10))) // // Expression Equivalent: // -// expression.Value(5).Plus(expression.Value(10)) -// // let :five and :ten be ExpressionAttributeValues representing the value -// // 5 and 10 respectively -// ":five + :ten" +// expression.Value(5).Plus(expression.Value(10)) +// // let :five and :ten be ExpressionAttributeValues representing the value +// // 5 and 10 respectively +// ":five + :ten" func (vb ValueBuilder) Plus(rightOperand OperandBuilder) SetValueBuilder { return Plus(vb, rightOperand) } @@ -289,15 +291,15 @@ func (vb ValueBuilder) Plus(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use Minus() to set the value of item attribute "someName" to 5 - 10 -// update, err := expression.Set(expression.Name("someName"), expression.Minus(expression.Value(5), expression.Value(10))) +// // Use Minus() to set the value of item attribute "someName" to 5 - 10 +// update, err := expression.Set(expression.Name("someName"), expression.Minus(expression.Value(5), expression.Value(10))) // // Expression Equivalent: // -// expression.Minus(expression.Value(5), expression.Value(10)) -// // let :five and :ten be ExpressionAttributeValues for the values 5 and -// // 10 respectively. -// ":five - :ten" +// expression.Minus(expression.Value(5), expression.Value(10)) +// // let :five and :ten be ExpressionAttributeValues for the values 5 and +// // 10 respectively. +// ":five - :ten" func Minus(leftOperand, rightOperand OperandBuilder) SetValueBuilder { return SetValueBuilder{ leftOperand: leftOperand, @@ -314,15 +316,15 @@ func Minus(leftOperand, rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use Minus() to set the value of item attribute "someName" to the -// // numeric value of "aName" decremented by 10 -// update, err := expression.Set(expression.Name("someName"), expression.Name("aName").Minus(expression.Value(10))) +// // Use Minus() to set the value of item attribute "someName" to the +// // numeric value of "aName" decremented by 10 +// update, err := expression.Set(expression.Name("someName"), expression.Name("aName").Minus(expression.Value(10))) // // Expression Equivalent: // -// expression.Name("aName").Minus(expression.Value(10))) -// // let :ten be ExpressionAttributeValues represent the value 10 -// "aName - :ten" +// expression.Name("aName").Minus(expression.Value(10))) +// // let :ten be ExpressionAttributeValues represent the value 10 +// "aName - :ten" func (nb NameBuilder) Minus(rightOperand OperandBuilder) SetValueBuilder { return Minus(nb, rightOperand) } @@ -335,15 +337,15 @@ func (nb NameBuilder) Minus(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use Minus() to set the value of item attribute "someName" to 5 - 10 -// update, err := expression.Set(expression.Name("someName"), expression.Value(5).Minus(expression.Value(10))) +// // Use Minus() to set the value of item attribute "someName" to 5 - 10 +// update, err := expression.Set(expression.Name("someName"), expression.Value(5).Minus(expression.Value(10))) // // Expression Equivalent: // -// expression.Value(5).Minus(expression.Value(10)) -// // let :five and :ten be ExpressionAttributeValues for the values 5 and -// // 10 respectively. -// ":five - :ten" +// expression.Value(5).Minus(expression.Value(10)) +// // let :five and :ten be ExpressionAttributeValues for the values 5 and +// // 10 respectively. +// ":five - :ten" func (vb ValueBuilder) Minus(rightOperand OperandBuilder) SetValueBuilder { return Minus(vb, rightOperand) } @@ -356,16 +358,16 @@ func (vb ValueBuilder) Minus(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use ListAppend() to set item attribute "someName" to the item -// // attribute "nameOfList" with "some" and "list" appended to it -// update, err := expression.Set(expression.Name("someName"), expression.ListAppend(expression.Name("nameOfList"), expression.Value([]string{"some", "list"}))) +// // Use ListAppend() to set item attribute "someName" to the item +// // attribute "nameOfList" with "some" and "list" appended to it +// update, err := expression.Set(expression.Name("someName"), expression.ListAppend(expression.Name("nameOfList"), expression.Value([]string{"some", "list"}))) // // Expression Equivalent: // -// expression.ListAppend(expression.Name("nameOfList"), expression.Value([]string{"some", "list"}) -// // let :list be a ExpressionAttributeValue representing the list -// // containing "some" and "list". -// "list_append (nameOfList, :list)" +// expression.ListAppend(expression.Name("nameOfList"), expression.Value([]string{"some", "list"}) +// // let :list be a ExpressionAttributeValue representing the list +// // containing "some" and "list". +// "list_append (nameOfList, :list)" func ListAppend(leftOperand, rightOperand OperandBuilder) SetValueBuilder { return SetValueBuilder{ leftOperand: leftOperand, @@ -382,16 +384,16 @@ func ListAppend(leftOperand, rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use ListAppend() to set item attribute "someName" to the item -// // attribute "nameOfList" with "some" and "list" appended to it -// update, err := expression.Set(expression.Name("someName"), expression.Name("nameOfList").ListAppend(expression.Value([]string{"some", "list"}))) +// // Use ListAppend() to set item attribute "someName" to the item +// // attribute "nameOfList" with "some" and "list" appended to it +// update, err := expression.Set(expression.Name("someName"), expression.Name("nameOfList").ListAppend(expression.Value([]string{"some", "list"}))) // // Expression Equivalent: // -// expression.Name("nameOfList").ListAppend(expression.Value([]string{"some", "list"}) -// // let :list be a ExpressionAttributeValue representing the list -// // containing "some" and "list". -// "list_append (nameOfList, :list)" +// expression.Name("nameOfList").ListAppend(expression.Value([]string{"some", "list"}) +// // let :list be a ExpressionAttributeValue representing the list +// // containing "some" and "list". +// "list_append (nameOfList, :list)" func (nb NameBuilder) ListAppend(rightOperand OperandBuilder) SetValueBuilder { return ListAppend(nb, rightOperand) } @@ -404,16 +406,16 @@ func (nb NameBuilder) ListAppend(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use ListAppend() to set item attribute "someName" to a string list -// // equal to {"a", "list", "some", "list"} -// update, err := expression.Set(expression.Name("someName"), expression.Value([]string{"a", "list"}).ListAppend(expression.Value([]string{"some", "list"}))) +// // Use ListAppend() to set item attribute "someName" to a string list +// // equal to {"a", "list", "some", "list"} +// update, err := expression.Set(expression.Name("someName"), expression.Value([]string{"a", "list"}).ListAppend(expression.Value([]string{"some", "list"}))) // // Expression Equivalent: // -// expression.Name([]string{"a", "list"}).ListAppend(expression.Value([]string{"some", "list"}) -// // let :list1 and :list2 be a ExpressionAttributeValue representing the -// // list {"a", "list"} and {"some", "list"} respectively -// "list_append (:list1, :list2)" +// expression.Name([]string{"a", "list"}).ListAppend(expression.Value([]string{"some", "list"}) +// // let :list1 and :list2 be a ExpressionAttributeValue representing the +// // list {"a", "list"} and {"some", "list"} respectively +// "list_append (:list1, :list2)" func (vb ValueBuilder) ListAppend(rightOperand OperandBuilder) SetValueBuilder { return ListAppend(vb, rightOperand) } @@ -427,15 +429,15 @@ func (vb ValueBuilder) ListAppend(rightOperand OperandBuilder) SetValueBuilder { // // Example: // -// // Use IfNotExists() to set item attribute "someName" to value 5 if -// // "someName" does not exist yet. (Prevents overwrite) -// update, err := expression.Set(expression.Name("someName"), expression.IfNotExists(expression.Name("someName"), expression.Value(5))) +// // Use IfNotExists() to set item attribute "someName" to value 5 if +// // "someName" does not exist yet. (Prevents overwrite) +// update, err := expression.Set(expression.Name("someName"), expression.IfNotExists(expression.Name("someName"), expression.Value(5))) // // Expression Equivalent: // -// expression.IfNotExists(expression.Name("someName"), expression.Value(5)) -// // let :five be a ExpressionAttributeValue representing the value 5 -// "if_not_exists (someName, :five)" +// expression.IfNotExists(expression.Name("someName"), expression.Value(5)) +// // let :five be a ExpressionAttributeValue representing the value 5 +// "if_not_exists (someName, :five)" func IfNotExists(name NameBuilder, setValue OperandBuilder) SetValueBuilder { return SetValueBuilder{ leftOperand: name, @@ -453,15 +455,15 @@ func IfNotExists(name NameBuilder, setValue OperandBuilder) SetValueBuilder { // // Example: // -// // Use IfNotExists() to set item attribute "someName" to value 5 if -// // "someName" does not exist yet. (Prevents overwrite) -// update, err := expression.Set(expression.Name("someName"), expression.Name("someName").IfNotExists(expression.Value(5))) +// // Use IfNotExists() to set item attribute "someName" to value 5 if +// // "someName" does not exist yet. (Prevents overwrite) +// update, err := expression.Set(expression.Name("someName"), expression.Name("someName").IfNotExists(expression.Value(5))) // // Expression Equivalent: // -// expression.Name("someName").IfNotExists(expression.Value(5)) -// // let :five be a ExpressionAttributeValue representing the value 5 -// "if_not_exists (someName, :five)" +// expression.Name("someName").IfNotExists(expression.Value(5)) +// // let :five be a ExpressionAttributeValue representing the value 5 +// "if_not_exists (someName, :five)" func (nb NameBuilder) IfNotExists(rightOperand OperandBuilder) SetValueBuilder { return IfNotExists(nb, rightOperand) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/projection.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/projection.go index 059bcf1ee5..4cdf83e790 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/projection.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/projection.go @@ -18,19 +18,19 @@ type ProjectionBuilder struct { // // Example: // -// // projection represents the list of names {"foo", "bar"} -// projection := expression.NamesList(expression.Name("foo"), expression.Name("bar")) +// // projection represents the list of names {"foo", "bar"} +// projection := expression.NamesList(expression.Name("foo"), expression.Name("bar")) // -// // Used in another Projection Expression -// anotherProjection := expression.AddNames(projection, expression.Name("baz")) +// // Used in another Projection Expression +// anotherProjection := expression.AddNames(projection, expression.Name("baz")) // -// // Used to make an Builder -// builder := expression.NewBuilder().WithProjection(anotherProjection) +// // Used to make an Builder +// builder := expression.NewBuilder().WithProjection(anotherProjection) // // Expression Equivalent: // -// expression.NamesList(expression.Name("foo"), expression.Name("bar")) -// "foo, bar" +// expression.NamesList(expression.Name("foo"), expression.Name("bar")) +// "foo, bar" func NamesList(nameBuilder NameBuilder, namesList ...NameBuilder) ProjectionBuilder { namesList = append([]NameBuilder{nameBuilder}, namesList...) return ProjectionBuilder{ @@ -45,18 +45,18 @@ func NamesList(nameBuilder NameBuilder, namesList ...NameBuilder) ProjectionBuil // // Example: // -// // projection represents the list of names {"foo", "bar"} -// projection := expression.Name("foo").NamesList(expression.Name("bar")) +// // projection represents the list of names {"foo", "bar"} +// projection := expression.Name("foo").NamesList(expression.Name("bar")) // -// // Used in another Projection Expression -// anotherProjection := expression.AddNames(projection, expression.Name("baz")) -// // Used to make an Builder -// builder := expression.NewBuilder().WithProjection(newProjection) +// // Used in another Projection Expression +// anotherProjection := expression.AddNames(projection, expression.Name("baz")) +// // Used to make an Builder +// builder := expression.NewBuilder().WithProjection(newProjection) // // Expression Equivalent: // -// expression.Name("foo").NamesList(expression.Name("bar")) -// "foo, bar" +// expression.Name("foo").NamesList(expression.Name("bar")) +// "foo, bar" func (nb NameBuilder) NamesList(namesList ...NameBuilder) ProjectionBuilder { return NamesList(nb, namesList...) } @@ -69,19 +69,19 @@ func (nb NameBuilder) NamesList(namesList ...NameBuilder) ProjectionBuilder { // // Example: // -// // projection represents the list of names {"foo", "bar", "baz", "qux"} -// oldProj := expression.NamesList(expression.Name("foo"), expression.Name("bar")) -// projection := expression.AddNames(oldProj, expression.Name("baz"), expression.Name("qux")) +// // projection represents the list of names {"foo", "bar", "baz", "qux"} +// oldProj := expression.NamesList(expression.Name("foo"), expression.Name("bar")) +// projection := expression.AddNames(oldProj, expression.Name("baz"), expression.Name("qux")) // -// // Used in another Projection Expression -// anotherProjection := expression.AddNames(projection, expression.Name("quux")) -// // Used to make an Builder -// builder := expression.NewBuilder().WithProjection(newProjection) +// // Used in another Projection Expression +// anotherProjection := expression.AddNames(projection, expression.Name("quux")) +// // Used to make an Builder +// builder := expression.NewBuilder().WithProjection(newProjection) // // Expression Equivalent: // -// expression.AddNames(expression.NamesList(expression.Name("foo"), expression.Name("bar")), expression.Name("baz"), expression.Name("qux")) -// "foo, bar, baz, qux" +// expression.AddNames(expression.NamesList(expression.Name("foo"), expression.Name("bar")), expression.Name("baz"), expression.Name("qux")) +// "foo, bar, baz, qux" func AddNames(projectionBuilder ProjectionBuilder, namesList ...NameBuilder) ProjectionBuilder { projectionBuilder.names = append(projectionBuilder.names, namesList...) return projectionBuilder @@ -95,19 +95,19 @@ func AddNames(projectionBuilder ProjectionBuilder, namesList ...NameBuilder) Pro // // Example: // -// // projection represents the list of names {"foo", "bar", "baz", "qux"} -// oldProj := expression.NamesList(expression.Name("foo"), expression.Name("bar")) -// projection := oldProj.AddNames(expression.Name("baz"), expression.Name("qux")) +// // projection represents the list of names {"foo", "bar", "baz", "qux"} +// oldProj := expression.NamesList(expression.Name("foo"), expression.Name("bar")) +// projection := oldProj.AddNames(expression.Name("baz"), expression.Name("qux")) // -// // Used in another Projection Expression -// anotherProjection := expression.AddNames(projection, expression.Name("quux")) -// // Used to make an Builder -// builder := expression.NewBuilder().WithProjection(newProjection) +// // Used in another Projection Expression +// anotherProjection := expression.AddNames(projection, expression.Name("quux")) +// // Used to make an Builder +// builder := expression.NewBuilder().WithProjection(newProjection) // // Expression Equivalent: // -// expression.NamesList(expression.Name("foo"), expression.Name("bar")).AddNames(expression.Name("baz"), expression.Name("qux")) -// "foo, bar, baz, qux" +// expression.NamesList(expression.Name("foo"), expression.Name("bar")).AddNames(expression.Name("baz"), expression.Name("qux")) +// "foo, bar, baz, qux" func (pb ProjectionBuilder) AddNames(namesList ...NameBuilder) ProjectionBuilder { return AddNames(pb, namesList...) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/update.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/update.go index b82d3d4f52..1a9e807a5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/update.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/expression/update.go @@ -97,21 +97,21 @@ func (ob operationBuilder) buildOperation() (exprNode, error) { // // Example: // -// // update represents the delete operation to delete the string value -// // "subsetToDelete" from the item attribute "pathToList" -// update := expression.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) +// // update represents the delete operation to delete the string value +// // "subsetToDelete" from the item attribute "pathToList" +// update := expression.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// expression.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) -// // let :del be an ExpressionAttributeValue representing the value -// // "subsetToDelete" -// "DELETE pathToList :del" +// expression.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) +// // let :del be an ExpressionAttributeValue representing the value +// // "subsetToDelete" +// "DELETE pathToList :del" func Delete(name NameBuilder, value ValueBuilder) UpdateBuilder { emptyUpdateBuilder := UpdateBuilder{} return emptyUpdateBuilder.Delete(name, value) @@ -124,22 +124,22 @@ func Delete(name NameBuilder, value ValueBuilder) UpdateBuilder { // // Example: // -// // Let update represent an already existing update expression. Delete() -// // adds the operation to delete the value "subsetToDelete" from the item -// // attribute "pathToList" -// update := update.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) +// // Let update represent an already existing update expression. Delete() +// // adds the operation to delete the value "subsetToDelete" from the item +// // attribute "pathToList" +// update := update.Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) -// // let :del be an ExpressionAttributeValue representing the value -// // "subsetToDelete" -// "DELETE pathToList :del" +// Delete(expression.Name("pathToList"), expression.Value("subsetToDelete")) +// // let :del be an ExpressionAttributeValue representing the value +// // "subsetToDelete" +// "DELETE pathToList :del" func (ub UpdateBuilder) Delete(name NameBuilder, value ValueBuilder) UpdateBuilder { if ub.operationList == nil { ub.operationList = map[operationMode][]operationBuilder{} @@ -160,20 +160,20 @@ func (ub UpdateBuilder) Delete(name NameBuilder, value ValueBuilder) UpdateBuild // // Example: // -// // update represents the add operation to add the value 5 to the item -// // attribute "aPath" -// update := expression.Add(expression.Name("aPath"), expression.Value(5)) +// // update represents the add operation to add the value 5 to the item +// // attribute "aPath" +// update := expression.Add(expression.Name("aPath"), expression.Value(5)) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// expression.Add(expression.Name("aPath"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "ADD aPath :5" +// expression.Add(expression.Name("aPath"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "ADD aPath :5" func Add(name NameBuilder, value ValueBuilder) UpdateBuilder { emptyUpdateBuilder := UpdateBuilder{} return emptyUpdateBuilder.Add(name, value) @@ -186,20 +186,20 @@ func Add(name NameBuilder, value ValueBuilder) UpdateBuilder { // // Example: // -// // Let update represent an already existing update expression. Add() adds -// // the operation to add the value 5 to the item attribute "aPath" -// update := update.Add(expression.Name("aPath"), expression.Value(5)) +// // Let update represent an already existing update expression. Add() adds +// // the operation to add the value 5 to the item attribute "aPath" +// update := update.Add(expression.Name("aPath"), expression.Value(5)) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// Add(expression.Name("aPath"), expression.Value(5)) -// // Let :five be an ExpressionAttributeValue representing the value 5 -// "ADD aPath :5" +// Add(expression.Name("aPath"), expression.Value(5)) +// // Let :five be an ExpressionAttributeValue representing the value 5 +// "ADD aPath :5" func (ub UpdateBuilder) Add(name NameBuilder, value ValueBuilder) UpdateBuilder { if ub.operationList == nil { ub.operationList = map[operationMode][]operationBuilder{} @@ -219,19 +219,19 @@ func (ub UpdateBuilder) Add(name NameBuilder, value ValueBuilder) UpdateBuilder // // Example: // -// // update represents the remove operation to remove the item attribute -// // "itemToRemove" -// update := expression.Remove(expression.Name("itemToRemove")) +// // update represents the remove operation to remove the item attribute +// // "itemToRemove" +// update := expression.Remove(expression.Name("itemToRemove")) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// expression.Remove(expression.Name("itemToRemove")) -// "REMOVE itemToRemove" +// expression.Remove(expression.Name("itemToRemove")) +// "REMOVE itemToRemove" func Remove(name NameBuilder) UpdateBuilder { emptyUpdateBuilder := UpdateBuilder{} return emptyUpdateBuilder.Remove(name) @@ -244,19 +244,19 @@ func Remove(name NameBuilder) UpdateBuilder { // // Example: // -// // Let update represent an already existing update expression. Remove() -// // adds the operation to remove the item attribute "itemToRemove" -// update := update.Remove(expression.Name("itemToRemove")) +// // Let update represent an already existing update expression. Remove() +// // adds the operation to remove the item attribute "itemToRemove" +// update := update.Remove(expression.Name("itemToRemove")) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// Remove(expression.Name("itemToRemove")) -// "REMOVE itemToRemove" +// Remove(expression.Name("itemToRemove")) +// "REMOVE itemToRemove" func (ub UpdateBuilder) Remove(name NameBuilder) UpdateBuilder { if ub.operationList == nil { ub.operationList = map[operationMode][]operationBuilder{} @@ -276,22 +276,22 @@ func (ub UpdateBuilder) Remove(name NameBuilder) UpdateBuilder { // // Example: // -// // update represents the set operation to set the item attribute -// // "itemToSet" to the value "setValue" if the item attribute does not -// // exist yet. (conditional write) -// update := expression.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) +// // update represents the set operation to set the item attribute +// // "itemToSet" to the value "setValue" if the item attribute does not +// // exist yet. (conditional write) +// update := expression.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// expression.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) -// // Let :val be an ExpressionAttributeValue representing the value -// // "setValue" -// "SET itemToSet = :val" +// expression.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) +// // Let :val be an ExpressionAttributeValue representing the value +// // "setValue" +// "SET itemToSet = :val" func Set(name NameBuilder, operandBuilder OperandBuilder) UpdateBuilder { emptyUpdateBuilder := UpdateBuilder{} return emptyUpdateBuilder.Set(name, operandBuilder) @@ -305,23 +305,23 @@ func Set(name NameBuilder, operandBuilder OperandBuilder) UpdateBuilder { // // Example: // -// // Let update represent an already existing update expression. Set() adds -// // the operation to to set the item attribute "itemToSet" to the value -// // "setValue" if the item attribute does not exist yet. (conditional -// // write) -// update := update.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) +// // Let update represent an already existing update expression. Set() adds +// // the operation to to set the item attribute "itemToSet" to the value +// // "setValue" if the item attribute does not exist yet. (conditional +// // write) +// update := update.Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) // -// // Adding more update methods -// anotherUpdate := update.Remove(expression.Name("someName")) -// // Creating a Builder -// builder := Update(update) +// // Adding more update methods +// anotherUpdate := update.Remove(expression.Name("someName")) +// // Creating a Builder +// builder := Update(update) // // Expression Equivalent: // -// Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) -// // Let :val be an ExpressionAttributeValue representing the value -// // "setValue" -// "SET itemToSet = :val" +// Set(expression.Name("itemToSet"), expression.IfNotExists(expression.Name("itemToSet"), expression.Value("setValue"))) +// // Let :val be an ExpressionAttributeValue representing the value +// // "setValue" +// "SET itemToSet = :val" func (ub UpdateBuilder) Set(name NameBuilder, operandBuilder OperandBuilder) UpdateBuilder { if ub.operationList == nil { ub.operationList = map[operationMode][]operationBuilder{} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index 5ac20deea9..ce0ed74469 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -42,13 +42,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a DynamoDB client from just a session. -// svc := dynamodb.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a DynamoDB client with additional configuration -// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go deleted file mode 100644 index 62ca2705c6..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ /dev/null @@ -1,18963 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opCancelKeyDeletion = "CancelKeyDeletion" - -// CancelKeyDeletionRequest generates a "aws/request.Request" representing the -// client's request for the CancelKeyDeletion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CancelKeyDeletion for more information on using the CancelKeyDeletion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CancelKeyDeletionRequest method. -// req, resp := client.CancelKeyDeletionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion -func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *request.Request, output *CancelKeyDeletionOutput) { - op := &request.Operation{ - Name: opCancelKeyDeletion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CancelKeyDeletionInput{} - } - - output = &CancelKeyDeletionOutput{} - req = c.newRequest(op, input, output) - return -} - -// CancelKeyDeletion API operation for AWS Key Management Service. -// -// Cancels the deletion of a KMS key. When this operation succeeds, the key -// state of the KMS key is Disabled. To enable the KMS key, use EnableKey. -// -// For more information about scheduling and canceling deletion of a KMS key, -// see Deleting KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:CancelKeyDeletion (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: ScheduleKeyDeletion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CancelKeyDeletion for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CancelKeyDeletion -func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { - req, out := c.CancelKeyDeletionRequest(input) - return out, req.Send() -} - -// CancelKeyDeletionWithContext is the same as CancelKeyDeletion with the addition of -// the ability to pass a context and additional request options. -// -// See CancelKeyDeletion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CancelKeyDeletionWithContext(ctx aws.Context, input *CancelKeyDeletionInput, opts ...request.Option) (*CancelKeyDeletionOutput, error) { - req, out := c.CancelKeyDeletionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opConnectCustomKeyStore = "ConnectCustomKeyStore" - -// ConnectCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the ConnectCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ConnectCustomKeyStore for more information on using the ConnectCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ConnectCustomKeyStoreRequest method. -// req, resp := client.ConnectCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore -func (c *KMS) ConnectCustomKeyStoreRequest(input *ConnectCustomKeyStoreInput) (req *request.Request, output *ConnectCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opConnectCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ConnectCustomKeyStoreInput{} - } - - output = &ConnectCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// ConnectCustomKeyStore API operation for AWS Key Management Service. -// -// Connects or reconnects a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// to its associated CloudHSM cluster. -// -// The custom key store must be connected before you can create KMS keys in -// the key store or use the KMS keys it contains. You can disconnect and reconnect -// a custom key store at any time. -// -// To connect a custom key store, its associated CloudHSM cluster must have -// at least one active HSM. To get the number of active HSMs in a cluster, use -// the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. To add HSMs to the cluster, use the CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. Also, the kmsuser crypto user (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// (CU) must not be logged into the cluster. This prevents KMS from using this -// account to log in. -// -// The connection process can take an extended amount of time to complete; up -// to 20 minutes. This operation starts the connection process, but it does -// not wait for it to complete. When it succeeds, this operation quickly returns -// an HTTP 200 response and a JSON object with no properties. However, this -// response does not indicate that the custom key store is connected. To get -// the connection state of the custom key store, use the DescribeCustomKeyStores -// operation. -// -// During the connection process, KMS finds the CloudHSM cluster that is associated -// with the custom key store, creates the connection infrastructure, connects -// to the cluster, logs into the CloudHSM client as the kmsuser CU, and rotates -// its password. -// -// The ConnectCustomKeyStore operation might fail for various reasons. To find -// the reason, use the DescribeCustomKeyStores operation and see the ConnectionErrorCode -// in the response. For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. -// -// To fix the failure, use the DisconnectCustomKeyStore operation to disconnect -// the custom key store, correct the error, use the UpdateCustomKeyStore operation -// if necessary, and then use ConnectCustomKeyStore again. -// -// If you are having trouble connecting or disconnecting a custom key store, -// see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:ConnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ConnectCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ConnectCustomKeyStore -func (c *KMS) ConnectCustomKeyStore(input *ConnectCustomKeyStoreInput) (*ConnectCustomKeyStoreOutput, error) { - req, out := c.ConnectCustomKeyStoreRequest(input) - return out, req.Send() -} - -// ConnectCustomKeyStoreWithContext is the same as ConnectCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See ConnectCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ConnectCustomKeyStoreWithContext(ctx aws.Context, input *ConnectCustomKeyStoreInput, opts ...request.Option) (*ConnectCustomKeyStoreOutput, error) { - req, out := c.ConnectCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateAlias = "CreateAlias" - -// CreateAliasRequest generates a "aws/request.Request" representing the -// client's request for the CreateAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateAlias for more information on using the CreateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateAliasRequest method. -// req, resp := client.CreateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias -func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { - op := &request.Operation{ - Name: opCreateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateAliasInput{} - } - - output = &CreateAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// CreateAlias API operation for AWS Key Management Service. -// -// Creates a friendly name for a KMS key. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// You can use an alias to identify a KMS key in the KMS console, in the DescribeKey -// operation and in cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), -// such as Encrypt and GenerateDataKey. You can also change the KMS key that's -// associated with the alias (UpdateAlias) or delete the alias (DeleteAlias) -// at any time. These operations don't affect the underlying KMS key. -// -// You can associate the alias with any customer managed key in the same Amazon -// Web Services Region. Each alias is associated with only one KMS key at a -// time, but a KMS key can have multiple aliases. A valid KMS key is required. -// You can't create an alias without a KMS key. -// -// The alias must be unique in the account and Region, but you can have aliases -// with the same name in different Regions. For detailed information about aliases, -// see Using aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html) -// in the Key Management Service Developer Guide. -// -// This operation does not return a response. To get the alias that you created, -// use the ListAliases operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on an alias in a -// different Amazon Web Services account. -// -// Required permissions -// -// * kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:CreateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * DeleteAlias -// -// * ListAliases -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * AlreadyExistsException -// The request was rejected because it attempted to create a resource that already -// exists. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidAliasNameException -// The request was rejected because the specified alias name is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateAlias -func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - return out, req.Send() -} - -// CreateAliasWithContext is the same as CreateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See CreateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateAliasWithContext(ctx aws.Context, input *CreateAliasInput, opts ...request.Option) (*CreateAliasOutput, error) { - req, out := c.CreateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateCustomKeyStore = "CreateCustomKeyStore" - -// CreateCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the CreateCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateCustomKeyStore for more information on using the CreateCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateCustomKeyStoreRequest method. -// req, resp := client.CreateCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore -func (c *KMS) CreateCustomKeyStoreRequest(input *CreateCustomKeyStoreInput) (req *request.Request, output *CreateCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opCreateCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateCustomKeyStoreInput{} - } - - output = &CreateCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateCustomKeyStore API operation for AWS Key Management Service. -// -// Creates a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// that is associated with an CloudHSM cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/clusters.html) -// that you own and manage. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Before you create the custom key store, you must assemble the required elements, -// including an CloudHSM cluster that fulfills the requirements for a custom -// key store. For details about the required elements, see Assemble the Prerequisites -// (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. -// -// When the operation completes successfully, it returns the ID of the new custom -// key store. Before you can use your new custom key store, you need to use -// the ConnectCustomKeyStore operation to connect the new key store to its CloudHSM -// cluster. Even if you are not going to use your custom key store immediately, -// you might want to connect it to verify that all settings are correct and -// then disconnect it until you are ready to use it. -// -// For help with failures, see Troubleshooting a Custom Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:CreateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CloudHsmClusterInUseException -// The request was rejected because the specified CloudHSM cluster is already -// associated with a custom key store or it shares a backup history with a cluster -// that is associated with a custom key store. Each custom key store must be -// associated with a different CloudHSM cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -// -// * CustomKeyStoreNameInUseException -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -// -// * CloudHsmClusterNotFoundException -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * IncorrectTrustAnchorException -// The request was rejected because the trust anchor certificate in the request -// is not the trust anchor certificate for the specified CloudHSM cluster. -// -// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), -// you create the trust anchor certificate and save it in the customerCA.crt -// file. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateCustomKeyStore -func (c *KMS) CreateCustomKeyStore(input *CreateCustomKeyStoreInput) (*CreateCustomKeyStoreOutput, error) { - req, out := c.CreateCustomKeyStoreRequest(input) - return out, req.Send() -} - -// CreateCustomKeyStoreWithContext is the same as CreateCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See CreateCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateCustomKeyStoreWithContext(ctx aws.Context, input *CreateCustomKeyStoreInput, opts ...request.Option) (*CreateCustomKeyStoreOutput, error) { - req, out := c.CreateCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateGrant = "CreateGrant" - -// CreateGrantRequest generates a "aws/request.Request" representing the -// client's request for the CreateGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateGrant for more information on using the CreateGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateGrantRequest method. -// req, resp := client.CreateGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant -func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, output *CreateGrantOutput) { - op := &request.Operation{ - Name: opCreateGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateGrantInput{} - } - - output = &CreateGrantOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateGrant API operation for AWS Key Management Service. -// -// Adds a grant to a KMS key. -// -// A grant is a policy instrument that allows Amazon Web Services principals -// to use KMS keys in cryptographic operations. It also can allow them to view -// a KMS key (DescribeKey) and create and manage grants. When authorizing access -// to a KMS key, grants are considered along with key policies and IAM policies. -// Grants are often used for temporary permissions because you can create one, -// use its permissions, and delete it without changing your key policies or -// IAM policies. -// -// For detailed information about grants, including grant terminology, see Grants -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// The CreateGrant operation returns a GrantToken and a GrantId. -// -// * When you create, retire, or revoke a grant, there might be a brief delay, -// usually less than five minutes, until the grant is available throughout -// KMS. This state is known as eventual consistency. Once the grant has achieved -// eventual consistency, the grantee principal can use the permissions in -// the grant without identifying the grant. However, to use the permissions -// in the grant immediately, use the GrantToken that CreateGrant returns. -// For details, see Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) -// in the Key Management Service Developer Guide . -// -// * The CreateGrant operation also returns a GrantId. You can use the GrantId -// and a key identifier to identify the grant in the RetireGrant and RevokeGrant -// operations. To find the grant ID, use the ListGrants or ListRetirableGrants -// operations. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:CreateGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateGrant for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateGrant -func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { - req, out := c.CreateGrantRequest(input) - return out, req.Send() -} - -// CreateGrantWithContext is the same as CreateGrant with the addition of -// the ability to pass a context and additional request options. -// -// See CreateGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateGrantWithContext(ctx aws.Context, input *CreateGrantInput, opts ...request.Option) (*CreateGrantOutput, error) { - req, out := c.CreateGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opCreateKey = "CreateKey" - -// CreateKeyRequest generates a "aws/request.Request" representing the -// client's request for the CreateKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See CreateKey for more information on using the CreateKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the CreateKeyRequest method. -// req, resp := client.CreateKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey -func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, output *CreateKeyOutput) { - op := &request.Operation{ - Name: opCreateKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateKeyInput{} - } - - output = &CreateKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// CreateKey API operation for AWS Key Management Service. -// -// Creates a unique customer managed KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms-keys) -// in your Amazon Web Services account and Region. -// -// In addition to the required parameters, you can use the optional parameters -// to specify a key policy, description, tags, and other useful elements for -// any key type. -// -// KMS is replacing the term customer master key (CMK) with KMS key and KMS -// key. The concept has not changed. To prevent breaking changes, KMS is keeping -// some variations of this term. -// -// To create different types of KMS keys, use the following guidance: -// -// Symmetric encryption KMS key -// -// To create a symmetric encryption KMS key, you aren't required to specify -// any parameters. The default value for KeySpec, SYMMETRIC_DEFAULT, and the -// default value for KeyUsage, ENCRYPT_DECRYPT, create a symmetric encryption -// KMS key. -// -// If you need a key for basic encryption and decryption or you are creating -// a KMS key to protect your resources in an Amazon Web Services service, create -// a symmetric encryption KMS key. The key material in a symmetric encryption -// key never leaves KMS unencrypted. You can use a symmetric encryption KMS -// key to encrypt and decrypt data up to 4,096 bytes, but they are typically -// used to generate data keys and data keys pairs. For details, see GenerateDataKey -// and GenerateDataKeyPair. -// -// Asymmetric KMS keys -// -// To create an asymmetric KMS key, use the KeySpec parameter to specify the -// type of key material in the KMS key. Then, use the KeyUsage parameter to -// determine whether the KMS key will be used to encrypt and decrypt or sign -// and verify. You can't change these properties after the KMS key is created. -// -// Asymmetric KMS keys contain an RSA key pair or an Elliptic Curve (ECC) key -// pair. The private key in an asymmetric KMS key never leaves AWS KMS unencrypted. -// However, you can use the GetPublicKey operation to download the public key -// so it can be used outside of AWS KMS. KMS keys with RSA key pairs can be -// used to encrypt or decrypt data or sign and verify messages (but not both). -// KMS keys with ECC key pairs can be used only to sign and verify messages. -// For information about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// HMAC KMS key -// -// To create an HMAC KMS key, set the KeySpec parameter to a key spec value -// for HMAC KMS keys. Then set the KeyUsage parameter to GENERATE_VERIFY_MAC. -// You must set the key usage even though GENERATE_VERIFY_MAC is the only valid -// key usage value for HMAC KMS keys. You can't change these properties after -// the KMS key is created. -// -// HMAC KMS keys are symmetric keys that never leave KMS unencrypted. You can -// use HMAC keys to generate (GenerateMac) and verify (VerifyMac) HMAC codes -// for messages up to 4096 bytes. -// -// HMAC KMS keys are not supported in all Amazon Web Services Regions. If you -// try to create an HMAC KMS key in an Amazon Web Services Region in which HMAC -// keys are not supported, the CreateKey operation returns an UnsupportedOperationException. -// For a list of Regions in which HMAC KMS keys are supported, see HMAC keys -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) -// in the Key Management Service Developer Guide. -// -// Multi-Region primary keys -// -// Imported key material -// -// To create a multi-Region primary key in the local Amazon Web Services Region, -// use the MultiRegion parameter with a value of True. To create a multi-Region -// replica key, that is, a KMS key with the same key ID and key material as -// a primary key, but in a different Amazon Web Services Region, use the ReplicateKey -// operation. To change a replica key to a primary key, and its primary key -// to a replica key, use the UpdatePrimaryRegion operation. -// -// You can create multi-Region KMS keys for all supported KMS key types: symmetric -// encryption KMS keys, HMAC KMS keys, asymmetric encryption KMS keys, and asymmetric -// signing KMS keys. You can also create multi-Region keys with imported key -// material. However, you can't create multi-Region keys in a custom key store. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// To import your own key material, begin by creating a symmetric encryption -// KMS key with no key material. To do this, use the Origin parameter of CreateKey -// with a value of EXTERNAL. Next, use GetParametersForImport operation to get -// a public key and import token, and use the public key to encrypt your key -// material. Then, use ImportKeyMaterial with your import token to import the -// key material. For step-by-step instructions, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide . -// -// This feature supports only symmetric encryption KMS keys, including multi-Region -// symmetric encryption KMS keys. You cannot import key material into any other -// type of KMS key. -// -// To create a multi-Region primary key with imported key material, use the -// Origin parameter of CreateKey with a value of EXTERNAL and the MultiRegion -// parameter with a value of True. To create replicas of the multi-Region primary -// key, use the ReplicateKey operation. For more information about multi-Region -// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// Custom key store -// -// To create a symmetric encryption KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// use the CustomKeyStoreId parameter to specify the custom key store. You must -// also use the Origin parameter with a value of AWS_CLOUDHSM. The CloudHSM -// cluster that is associated with the custom key store must have at least two -// active HSMs in different Availability Zones in the Amazon Web Services Region. -// -// Custom key stores support only symmetric encryption KMS keys. You cannot -// create an HMAC KMS key or an asymmetric KMS key in a custom key store. For -// information about custom key stores in KMS see Custom key stores in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// in the Key Management Service Developer Guide . -// -// Cross-account use: No. You cannot use this operation to create a KMS key -// in a different Amazon Web Services account. -// -// Required permissions: kms:CreateKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). To use the Tags parameter, kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy). For examples and information about related permissions, see -// Allow a user to create KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policy-example-create-key) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * DescribeKey -// -// * ListKeys -// -// * ScheduleKeyDeletion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation CreateKey for usage and error information. -// -// Returned Error Types: -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/CreateKey -func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { - req, out := c.CreateKeyRequest(input) - return out, req.Send() -} - -// CreateKeyWithContext is the same as CreateKey with the addition of -// the ability to pass a context and additional request options. -// -// See CreateKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) CreateKeyWithContext(ctx aws.Context, input *CreateKeyInput, opts ...request.Option) (*CreateKeyOutput, error) { - req, out := c.CreateKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDecrypt = "Decrypt" - -// DecryptRequest generates a "aws/request.Request" representing the -// client's request for the Decrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Decrypt for more information on using the Decrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DecryptRequest method. -// req, resp := client.DecryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt -func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output *DecryptOutput) { - op := &request.Operation{ - Name: opDecrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DecryptInput{} - } - - output = &DecryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// Decrypt API operation for AWS Key Management Service. -// -// Decrypts ciphertext that was encrypted by a KMS key using any of the following -// operations: -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyWithoutPlaintext -// -// * GenerateDataKeyPairWithoutPlaintext -// -// You can use this operation to decrypt ciphertext that was encrypted under -// a symmetric encryption KMS key or an asymmetric encryption KMS key. When -// the KMS key is asymmetric, you must specify the KMS key and the encryption -// algorithm that was used to encrypt the ciphertext. For information about -// asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// The Decrypt operation also decrypts ciphertext that was encrypted outside -// of KMS by the public key in an KMS asymmetric KMS key. However, it cannot -// decrypt symmetric ciphertext produced by other libraries, such as the Amazon -// Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. -// -// If the ciphertext was encrypted under a symmetric encryption KMS key, the -// KeyId parameter is optional. KMS can get this information from metadata that -// it adds to the symmetric ciphertext blob. This feature adds durability to -// your implementation by ensuring that authorized users can decrypt ciphertext -// decades after it was encrypted, even if they've lost track of the key ID. -// However, specifying the KMS key is always recommended as a best practice. -// When you use the KeyId parameter to specify a KMS key, KMS only uses the -// KMS key you specify. If the ciphertext was encrypted under a different KMS -// key, the Decrypt operation fails. This practice ensures that you use the -// KMS key that you intend. -// -// Whenever possible, use key policies to give users permission to call the -// Decrypt operation on a particular KMS key, instead of using IAM policies. -// Otherwise, you might create an IAM user policy that gives the user Decrypt -// permission on all KMS keys. This user could decrypt ciphertext that was encrypted -// by KMS keys in other accounts if the key policy for the cross-account KMS -// key permits it. If you must use an IAM policy for Decrypt permissions, limit -// the user to particular KMS keys or particular trusted accounts. For details, -// see Best practices for IAM policies (https://docs.aws.amazon.com/kms/latest/developerguide/iam-policies.html#iam-policies-best-practices) -// in the Key Management Service Developer Guide. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Decrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * ReEncrypt -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Decrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * IncorrectKeyException -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Decrypt -func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { - req, out := c.DecryptRequest(input) - return out, req.Send() -} - -// DecryptWithContext is the same as Decrypt with the addition of -// the ability to pass a context and additional request options. -// -// See Decrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DecryptWithContext(ctx aws.Context, input *DecryptInput, opts ...request.Option) (*DecryptOutput, error) { - req, out := c.DecryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteAlias = "DeleteAlias" - -// DeleteAliasRequest generates a "aws/request.Request" representing the -// client's request for the DeleteAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteAlias for more information on using the DeleteAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteAliasRequest method. -// req, resp := client.DeleteAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias -func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { - op := &request.Operation{ - Name: opDeleteAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteAliasInput{} - } - - output = &DeleteAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteAlias API operation for AWS Key Management Service. -// -// Deletes the specified alias. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// Because an alias is not a property of a KMS key, you can delete and change -// the aliases of a KMS key without affecting the KMS key. Also, aliases do -// not appear in the response from the DescribeKey operation. To get the aliases -// of all KMS keys, use the ListAliases operation. -// -// Each KMS key can have multiple aliases. To change the alias of a KMS key, -// use DeleteAlias to delete the current alias and CreateAlias to create a new -// alias. To associate an existing alias with a different KMS key, call UpdateAlias. -// -// Cross-account use: No. You cannot perform this operation on an alias in a -// different Amazon Web Services account. -// -// Required permissions -// -// * kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:DeleteAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * ListAliases -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteAlias -func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - return out, req.Send() -} - -// DeleteAliasWithContext is the same as DeleteAlias with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteAliasWithContext(ctx aws.Context, input *DeleteAliasInput, opts ...request.Option) (*DeleteAliasOutput, error) { - req, out := c.DeleteAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteCustomKeyStore = "DeleteCustomKeyStore" - -// DeleteCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteCustomKeyStore for more information on using the DeleteCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteCustomKeyStoreRequest method. -// req, resp := client.DeleteCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStore -func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req *request.Request, output *DeleteCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opDeleteCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteCustomKeyStoreInput{} - } - - output = &DeleteCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteCustomKeyStore API operation for AWS Key Management Service. -// -// Deletes a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// This operation does not delete the CloudHSM cluster that is associated with -// the custom key store, or affect any users or keys in the cluster. -// -// The custom key store that you delete cannot contain any KMS KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys). -// Before deleting the key store, verify that you will never need to use any -// of the KMS keys in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// Then, use ScheduleKeyDeletion to delete the KMS keys from the key store. -// When the scheduled waiting period expires, the ScheduleKeyDeletion operation -// deletes the KMS keys. Then it makes a best effort to delete the key material -// from the associated cluster. However, you might need to manually delete the -// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) -// from the cluster and its backups. -// -// After all KMS keys are deleted from KMS, use DisconnectCustomKeyStore to -// disconnect the key store from KMS. Then, you can delete the custom key store. -// -// Instead of deleting the custom key store, consider using DisconnectCustomKeyStore -// to disconnect it from KMS. While the key store is disconnected, you cannot -// create or use the KMS keys in the key store. But, you do not need to delete -// KMS keys and you can reconnect a disconnected custom key store at any time. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DeleteCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreHasCMKsException -// The request was rejected because the custom key store contains KMS keys. -// After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion -// operation to delete the KMS keys. After they are deleted, you can delete -// the custom key store. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteCustomKeyStore -func (c *KMS) DeleteCustomKeyStore(input *DeleteCustomKeyStoreInput) (*DeleteCustomKeyStoreOutput, error) { - req, out := c.DeleteCustomKeyStoreRequest(input) - return out, req.Send() -} - -// DeleteCustomKeyStoreWithContext is the same as DeleteCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteCustomKeyStoreWithContext(ctx aws.Context, input *DeleteCustomKeyStoreInput, opts ...request.Option) (*DeleteCustomKeyStoreOutput, error) { - req, out := c.DeleteCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteImportedKeyMaterial = "DeleteImportedKeyMaterial" - -// DeleteImportedKeyMaterialRequest generates a "aws/request.Request" representing the -// client's request for the DeleteImportedKeyMaterial operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteImportedKeyMaterial for more information on using the DeleteImportedKeyMaterial -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteImportedKeyMaterialRequest method. -// req, resp := client.DeleteImportedKeyMaterialRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial -func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialInput) (req *request.Request, output *DeleteImportedKeyMaterialOutput) { - op := &request.Operation{ - Name: opDeleteImportedKeyMaterial, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteImportedKeyMaterialInput{} - } - - output = &DeleteImportedKeyMaterialOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteImportedKeyMaterial API operation for AWS Key Management Service. -// -// Deletes key material that you previously imported. This operation makes the -// specified KMS key unusable. For more information about importing key material -// into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// When the specified KMS key is in the PendingDeletion state, this operation -// does not change the KMS key's state. Otherwise, it changes the KMS key's -// state to PendingImport. -// -// After you delete key material, you can use ImportKeyMaterial to reimport -// the same key material into the KMS key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DeleteImportedKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetParametersForImport -// -// * ImportKeyMaterial -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DeleteImportedKeyMaterial for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DeleteImportedKeyMaterial -func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) { - req, out := c.DeleteImportedKeyMaterialRequest(input) - return out, req.Send() -} - -// DeleteImportedKeyMaterialWithContext is the same as DeleteImportedKeyMaterial with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteImportedKeyMaterial for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DeleteImportedKeyMaterialWithContext(ctx aws.Context, input *DeleteImportedKeyMaterialInput, opts ...request.Option) (*DeleteImportedKeyMaterialOutput, error) { - req, out := c.DeleteImportedKeyMaterialRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeCustomKeyStores = "DescribeCustomKeyStores" - -// DescribeCustomKeyStoresRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCustomKeyStores operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeCustomKeyStores for more information on using the DescribeCustomKeyStores -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeCustomKeyStoresRequest method. -// req, resp := client.DescribeCustomKeyStoresRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStores -func (c *KMS) DescribeCustomKeyStoresRequest(input *DescribeCustomKeyStoresInput) (req *request.Request, output *DescribeCustomKeyStoresOutput) { - op := &request.Operation{ - Name: opDescribeCustomKeyStores, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeCustomKeyStoresInput{} - } - - output = &DescribeCustomKeyStoresOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeCustomKeyStores API operation for AWS Key Management Service. -// -// Gets information about custom key stores (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// in the account and Region. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// By default, this operation returns information about all custom key stores -// in the account and Region. To get only information about a particular custom -// key store, use either the CustomKeyStoreName or CustomKeyStoreId parameter -// (but not both). -// -// To determine whether the custom key store is connected to its CloudHSM cluster, -// use the ConnectionState element in the response. If an attempt to connect -// the custom key store failed, the ConnectionState value is FAILED and the -// ConnectionErrorCode element in the response indicates the cause of the failure. -// For help interpreting the ConnectionErrorCode, see CustomKeyStoresListEntry. -// -// Custom key stores have a DISCONNECTED connection state if the key store has -// never been connected or you use the DisconnectCustomKeyStore operation to -// disconnect it. If your custom key store state is CONNECTED but you are having -// trouble using it, make sure that its associated CloudHSM cluster is active -// and contains the minimum number of HSMs required for the operation, if any. -// -// For help repairing your custom key store, see the Troubleshooting Custom -// Key Stores (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) -// topic in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DescribeCustomKeyStores (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DisconnectCustomKeyStore -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DescribeCustomKeyStores for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeCustomKeyStores -func (c *KMS) DescribeCustomKeyStores(input *DescribeCustomKeyStoresInput) (*DescribeCustomKeyStoresOutput, error) { - req, out := c.DescribeCustomKeyStoresRequest(input) - return out, req.Send() -} - -// DescribeCustomKeyStoresWithContext is the same as DescribeCustomKeyStores with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeCustomKeyStores for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DescribeCustomKeyStoresWithContext(ctx aws.Context, input *DescribeCustomKeyStoresInput, opts ...request.Option) (*DescribeCustomKeyStoresOutput, error) { - req, out := c.DescribeCustomKeyStoresRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeKey = "DescribeKey" - -// DescribeKeyRequest generates a "aws/request.Request" representing the -// client's request for the DescribeKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeKey for more information on using the DescribeKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeKeyRequest method. -// req, resp := client.DescribeKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey -func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, output *DescribeKeyOutput) { - op := &request.Operation{ - Name: opDescribeKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeKeyInput{} - } - - output = &DescribeKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeKey API operation for AWS Key Management Service. -// -// Provides detailed information about a KMS key. You can run DescribeKey on -// a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) -// or an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). -// -// This detailed information includes the key ARN, creation date (and deletion -// date, if applicable), the key state, and the origin and expiration date (if -// any) of the key material. It includes fields, like KeySpec, that help you -// distinguish different types of KMS keys. It also displays the key usage (encryption, -// signing, or generating and verifying MACs) and the algorithms that the KMS -// key supports. For KMS keys in custom key stores, it includes information -// about the custom key store, such as the key store ID and the CloudHSM cluster -// ID. For multi-Region keys, it displays the primary key and all related replica -// keys. -// -// DescribeKey does not return the following information: -// -// * Aliases associated with the KMS key. To get this information, use ListAliases. -// -// * Whether automatic key rotation is enabled on the KMS key. To get this -// information, use GetKeyRotationStatus. Also, some key states prevent a -// KMS key from being automatically rotated. For details, see How Automatic -// Key Rotation Works (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-how-it-works) -// in Key Management Service Developer Guide. -// -// * Tags on the KMS key. To get this information, use ListResourceTags. -// -// * Key policies and grants on the KMS key. To get this information, use -// GetKeyPolicy and ListGrants. -// -// In general, DescribeKey is a non-mutating operation. It returns data about -// KMS keys, but doesn't change them. However, Amazon Web Services services -// use DescribeKey to create Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) -// from a predefined Amazon Web Services alias with no key ID. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:DescribeKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetKeyPolicy -// -// * GetKeyRotationStatus -// -// * ListAliases -// -// * ListGrants -// -// * ListKeys -// -// * ListResourceTags -// -// * ListRetirableGrants -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DescribeKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DescribeKey -func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { - req, out := c.DescribeKeyRequest(input) - return out, req.Send() -} - -// DescribeKeyWithContext is the same as DescribeKey with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DescribeKeyWithContext(ctx aws.Context, input *DescribeKeyInput, opts ...request.Option) (*DescribeKeyOutput, error) { - req, out := c.DescribeKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisableKey = "DisableKey" - -// DisableKeyRequest generates a "aws/request.Request" representing the -// client's request for the DisableKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisableKey for more information on using the DisableKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisableKeyRequest method. -// req, resp := client.DisableKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey -func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, output *DisableKeyOutput) { - op := &request.Operation{ - Name: opDisableKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisableKeyInput{} - } - - output = &DisableKeyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisableKey API operation for AWS Key Management Service. -// -// Sets the state of a KMS key to disabled. This change temporarily prevents -// use of the KMS key for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DisableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: EnableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisableKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKey -func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { - req, out := c.DisableKeyRequest(input) - return out, req.Send() -} - -// DisableKeyWithContext is the same as DisableKey with the addition of -// the ability to pass a context and additional request options. -// -// See DisableKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisableKeyWithContext(ctx aws.Context, input *DisableKeyInput, opts ...request.Option) (*DisableKeyOutput, error) { - req, out := c.DisableKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisableKeyRotation = "DisableKeyRotation" - -// DisableKeyRotationRequest generates a "aws/request.Request" representing the -// client's request for the DisableKeyRotation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisableKeyRotation for more information on using the DisableKeyRotation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisableKeyRotationRequest method. -// req, resp := client.DisableKeyRotationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation -func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *request.Request, output *DisableKeyRotationOutput) { - op := &request.Operation{ - Name: opDisableKeyRotation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisableKeyRotationInput{} - } - - output = &DisableKeyRotationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisableKeyRotation API operation for AWS Key Management Service. -// -// Disables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric encryption KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:DisableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * EnableKeyRotation -// -// * GetKeyRotationStatus -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisableKeyRotation for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisableKeyRotation -func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRotationOutput, error) { - req, out := c.DisableKeyRotationRequest(input) - return out, req.Send() -} - -// DisableKeyRotationWithContext is the same as DisableKeyRotation with the addition of -// the ability to pass a context and additional request options. -// -// See DisableKeyRotation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisableKeyRotationWithContext(ctx aws.Context, input *DisableKeyRotationInput, opts ...request.Option) (*DisableKeyRotationOutput, error) { - req, out := c.DisableKeyRotationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDisconnectCustomKeyStore = "DisconnectCustomKeyStore" - -// DisconnectCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the DisconnectCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DisconnectCustomKeyStore for more information on using the DisconnectCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DisconnectCustomKeyStoreRequest method. -// req, resp := client.DisconnectCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStore -func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInput) (req *request.Request, output *DisconnectCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opDisconnectCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DisconnectCustomKeyStoreInput{} - } - - output = &DisconnectCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DisconnectCustomKeyStore API operation for AWS Key Management Service. -// -// Disconnects the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// from its associated CloudHSM cluster. While a custom key store is disconnected, -// you can manage the custom key store and its KMS keys, but you cannot create -// or use KMS keys in the custom key store. You can reconnect the custom key -// store at any time. -// -// While a custom key store is disconnected, all attempts to create KMS keys -// in the custom key store or to use existing KMS keys in cryptographic operations -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) -// will fail. This action can prevent users from storing and accessing sensitive -// data. -// -// To find the connection state of a custom key store, use the DescribeCustomKeyStores -// operation. To reconnect a custom key store, use the ConnectCustomKeyStore -// operation. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:DisconnectCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * UpdateCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation DisconnectCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/DisconnectCustomKeyStore -func (c *KMS) DisconnectCustomKeyStore(input *DisconnectCustomKeyStoreInput) (*DisconnectCustomKeyStoreOutput, error) { - req, out := c.DisconnectCustomKeyStoreRequest(input) - return out, req.Send() -} - -// DisconnectCustomKeyStoreWithContext is the same as DisconnectCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See DisconnectCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) DisconnectCustomKeyStoreWithContext(ctx aws.Context, input *DisconnectCustomKeyStoreInput, opts ...request.Option) (*DisconnectCustomKeyStoreOutput, error) { - req, out := c.DisconnectCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEnableKey = "EnableKey" - -// EnableKeyRequest generates a "aws/request.Request" representing the -// client's request for the EnableKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See EnableKey for more information on using the EnableKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EnableKeyRequest method. -// req, resp := client.EnableKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey -func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, output *EnableKeyOutput) { - op := &request.Operation{ - Name: opEnableKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EnableKeyInput{} - } - - output = &EnableKeyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// EnableKey API operation for AWS Key Management Service. -// -// Sets the key state of a KMS key to enabled. This allows you to use the KMS -// key for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:EnableKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: DisableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation EnableKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKey -func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { - req, out := c.EnableKeyRequest(input) - return out, req.Send() -} - -// EnableKeyWithContext is the same as EnableKey with the addition of -// the ability to pass a context and additional request options. -// -// See EnableKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EnableKeyWithContext(ctx aws.Context, input *EnableKeyInput, opts ...request.Option) (*EnableKeyOutput, error) { - req, out := c.EnableKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEnableKeyRotation = "EnableKeyRotation" - -// EnableKeyRotationRequest generates a "aws/request.Request" representing the -// client's request for the EnableKeyRotation operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See EnableKeyRotation for more information on using the EnableKeyRotation -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EnableKeyRotationRequest method. -// req, resp := client.EnableKeyRotationRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation -func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *request.Request, output *EnableKeyRotationOutput) { - op := &request.Operation{ - Name: opEnableKeyRotation, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EnableKeyRotationInput{} - } - - output = &EnableKeyRotationOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// EnableKeyRotation API operation for AWS Key Management Service. -// -// Enables automatic rotation of the key material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// for the specified symmetric encryption KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:EnableKeyRotation (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DisableKeyRotation -// -// * GetKeyRotationStatus -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation EnableKeyRotation for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/EnableKeyRotation -func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotationOutput, error) { - req, out := c.EnableKeyRotationRequest(input) - return out, req.Send() -} - -// EnableKeyRotationWithContext is the same as EnableKeyRotation with the addition of -// the ability to pass a context and additional request options. -// -// See EnableKeyRotation for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EnableKeyRotationWithContext(ctx aws.Context, input *EnableKeyRotationInput, opts ...request.Option) (*EnableKeyRotationOutput, error) { - req, out := c.EnableKeyRotationRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opEncrypt = "Encrypt" - -// EncryptRequest generates a "aws/request.Request" representing the -// client's request for the Encrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Encrypt for more information on using the Encrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the EncryptRequest method. -// req, resp := client.EncryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt -func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output *EncryptOutput) { - op := &request.Operation{ - Name: opEncrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &EncryptInput{} - } - - output = &EncryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// Encrypt API operation for AWS Key Management Service. -// -// Encrypts plaintext of up to 4,096 bytes using a KMS key. You can use a symmetric -// or asymmetric KMS key with a KeyUsage of ENCRYPT_DECRYPT. -// -// You can use this operation to encrypt small amounts of arbitrary data, such -// as a personal identifier or database password, or other sensitive information. -// You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey -// and GenerateDataKeyPair operations return a plaintext data key and an encrypted -// copy of that data key. -// -// If you use a symmetric encryption KMS key, you can use an encryption context -// to add additional security to your encryption operation. If you specify an -// EncryptionContext when encrypting data, you must specify the same encryption -// context (a case-sensitive exact match) when decrypting the data. Otherwise, -// the request to decrypt fails with an InvalidCiphertextException. For more -// information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// If you specify an asymmetric KMS key, you must also specify the encryption -// algorithm. The algorithm must be compatible with the KMS key type. -// -// When you use an asymmetric KMS key to encrypt or reencrypt data, be sure -// to record the KMS key and encryption algorithm that you choose. You will -// be required to provide the same KMS key and encryption algorithm when you -// decrypt the data. If the KMS key and algorithm do not match the values used -// to encrypt the data, the decrypt operation fails. -// -// You are not required to supply the key ID and encryption algorithm when you -// decrypt with symmetric encryption KMS keys because KMS stores this information -// in the ciphertext blob. KMS cannot store metadata in ciphertext generated -// with asymmetric keys. The standard format for asymmetric key ciphertext does -// not include configurable fields. -// -// The maximum size of the data that you can encrypt varies with the type of -// KMS key and the encryption algorithm that you choose. -// -// * Symmetric encryption KMS keys SYMMETRIC_DEFAULT: 4096 bytes -// -// * RSA_2048 RSAES_OAEP_SHA_1: 214 bytes RSAES_OAEP_SHA_256: 190 bytes -// -// * RSA_3072 RSAES_OAEP_SHA_1: 342 bytes RSAES_OAEP_SHA_256: 318 bytes -// -// * RSA_4096 RSAES_OAEP_SHA_1: 470 bytes RSAES_OAEP_SHA_256: 446 bytes -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Encrypt (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Encrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Encrypt -func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { - req, out := c.EncryptRequest(input) - return out, req.Send() -} - -// EncryptWithContext is the same as Encrypt with the addition of -// the ability to pass a context and additional request options. -// -// See Encrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) EncryptWithContext(ctx aws.Context, input *EncryptInput, opts ...request.Option) (*EncryptOutput, error) { - req, out := c.EncryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKey = "GenerateDataKey" - -// GenerateDataKeyRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKey for more information on using the GenerateDataKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyRequest method. -// req, resp := client.GenerateDataKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey -func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.Request, output *GenerateDataKeyOutput) { - op := &request.Operation{ - Name: opGenerateDataKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyInput{} - } - - output = &GenerateDataKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKey API operation for AWS Key Management Service. -// -// Returns a unique symmetric data key for use outside of KMS. This operation -// returns a plaintext copy of the data key and a copy that is encrypted under -// a symmetric encryption KMS key that you specify. The bytes in the plaintext -// key are random; they are not related to the caller or the KMS key. You can -// use the plaintext key to encrypt your data outside of KMS and store the encrypted -// data key with the encrypted data. -// -// To generate a data key, specify the symmetric encryption KMS key that will -// be used to encrypt the data key. You cannot use an asymmetric KMS key to -// encrypt data keys. To get the type of your KMS key, use the DescribeKey operation. -// You must also specify the length of the data key. Use either the KeySpec -// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data -// keys, use the KeySpec parameter. -// -// To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. -// To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext -// operation. To get a cryptographically secure random byte string, use GenerateRandom. -// -// You can use an optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// How to use your data key -// -// We recommend that you use the following pattern to encrypt data locally in -// your application. You can write your own code or use a client-side encryption -// library, such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), -// the Amazon DynamoDB Encryption Client (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) -// to do these tasks for you. -// -// To encrypt data outside of KMS: -// -// Use the GenerateDataKey operation to get a data key. -// -// Use the plaintext data key (in the Plaintext field of the response) to encrypt -// your data outside of KMS. Then erase the plaintext data key from memory. -// -// Store the encrypted data key (in the CiphertextBlob field of the response) -// with the encrypted data. -// -// To decrypt data outside of KMS: -// -// Use the Decrypt operation to decrypt the encrypted data key. The operation -// returns a plaintext copy of the data key. -// -// Use the plaintext data key to decrypt data outside of KMS, then erase the -// plaintext data key from memory. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKey -func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutput, error) { - req, out := c.GenerateDataKeyRequest(input) - return out, req.Send() -} - -// GenerateDataKeyWithContext is the same as GenerateDataKey with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyWithContext(ctx aws.Context, input *GenerateDataKeyInput, opts ...request.Option) (*GenerateDataKeyOutput, error) { - req, out := c.GenerateDataKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyPair = "GenerateDataKeyPair" - -// GenerateDataKeyPairRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyPair operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyPair for more information on using the GenerateDataKeyPair -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyPairRequest method. -// req, resp := client.GenerateDataKeyPairRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair -func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req *request.Request, output *GenerateDataKeyPairOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyPair, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyPairInput{} - } - - output = &GenerateDataKeyPairOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyPair API operation for AWS Key Management Service. -// -// Returns a unique asymmetric data key pair for use outside of KMS. This operation -// returns a plaintext public key, a plaintext private key, and a copy of the -// private key that is encrypted under the symmetric encryption KMS key you -// specify. You can use the data key pair to perform asymmetric cryptography -// and implement digital signatures outside of KMS. The bytes in the keys are -// random; they not related to the caller or to the KMS key that is used to -// encrypt the private key. -// -// You can use the public key that GenerateDataKeyPair returns to encrypt data -// or verify a signature outside of KMS. Then, store the encrypted private key -// with the data. When you are ready to decrypt data or sign a message, you -// can use the Decrypt operation to decrypt the encrypted private key. -// -// To generate a data key pair, you must specify a symmetric encryption KMS -// key to encrypt the private key in a data key pair. You cannot use an asymmetric -// KMS key or a KMS key in a custom key store. To get the type and origin of -// your KMS key, use the DescribeKey operation. -// -// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data -// key pair. KMS recommends that your use ECC key pairs for signing, and use -// RSA key pairs for either encryption or signing, but not both. However, KMS -// cannot enforce any restrictions on the use of data key pairs outside of KMS. -// -// If you are using the data key pair to encrypt data, or for any operation -// where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext -// operation. GenerateDataKeyPairWithoutPlaintext returns a plaintext public -// key and an encrypted private key, but omits the plaintext private key that -// you need only to decrypt ciphertext or sign a message. Later, when you need -// to decrypt the data or sign a message, use the Decrypt operation to decrypt -// the encrypted private key in the data key pair. -// -// GenerateDataKeyPair returns a unique data key pair for each request. The -// bytes in the keys are random; they are not related to the caller or the KMS -// key that is used to encrypt the private key. The public key is a DER-encoded -// X.509 SubjectPublicKeyInfo, as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280). -// The private key is a DER-encoded PKCS8 PrivateKeyInfo, as specified in RFC -// 5958 (https://tools.ietf.org/html/rfc5958). -// -// You can use an optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyPair (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPairWithoutPlaintext -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyPair for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair -func (c *KMS) GenerateDataKeyPair(input *GenerateDataKeyPairInput) (*GenerateDataKeyPairOutput, error) { - req, out := c.GenerateDataKeyPairRequest(input) - return out, req.Send() -} - -// GenerateDataKeyPairWithContext is the same as GenerateDataKeyPair with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyPair for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyPairWithContext(ctx aws.Context, input *GenerateDataKeyPairInput, opts ...request.Option) (*GenerateDataKeyPairOutput, error) { - req, out := c.GenerateDataKeyPairRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" - -// GenerateDataKeyPairWithoutPlaintextRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyPairWithoutPlaintext operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyPairWithoutPlaintext for more information on using the GenerateDataKeyPairWithoutPlaintext -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyPairWithoutPlaintextRequest method. -// req, resp := client.GenerateDataKeyPairWithoutPlaintextRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext -func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyPairWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyPairWithoutPlaintextOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyPairWithoutPlaintext, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyPairWithoutPlaintextInput{} - } - - output = &GenerateDataKeyPairWithoutPlaintextOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyPairWithoutPlaintext API operation for AWS Key Management Service. -// -// Returns a unique asymmetric data key pair for use outside of KMS. This operation -// returns a plaintext public key and a copy of the private key that is encrypted -// under the symmetric encryption KMS key you specify. Unlike GenerateDataKeyPair, -// this operation does not return a plaintext private key. The bytes in the -// keys are random; they are not related to the caller or to the KMS key that -// is used to encrypt the private key. -// -// You can use the public key that GenerateDataKeyPairWithoutPlaintext returns -// to encrypt data or verify a signature outside of KMS. Then, store the encrypted -// private key with the data. When you are ready to decrypt data or sign a message, -// you can use the Decrypt operation to decrypt the encrypted private key. -// -// To generate a data key pair, you must specify a symmetric encryption KMS -// key to encrypt the private key in a data key pair. You cannot use an asymmetric -// KMS key or a KMS key in a custom key store. To get the type and origin of -// your KMS key, use the DescribeKey operation. -// -// Use the KeyPairSpec parameter to choose an RSA or Elliptic Curve (ECC) data -// key pair. KMS recommends that your use ECC key pairs for signing, and use -// RSA key pairs for either encryption or signing, but not both. However, KMS -// cannot enforce any restrictions on the use of data key pairs outside of KMS. -// -// GenerateDataKeyPairWithoutPlaintext returns a unique data key pair for each -// request. The bytes in the key are not related to the caller or KMS key that -// is used to encrypt the private key. The public key is a DER-encoded X.509 -// SubjectPublicKeyInfo, as specified in RFC 5280 (https://tools.ietf.org/html/rfc5280). -// -// You can use an optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyPairWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyPairWithoutPlaintext for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext -func (c *KMS) GenerateDataKeyPairWithoutPlaintext(input *GenerateDataKeyPairWithoutPlaintextInput) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) - return out, req.Send() -} - -// GenerateDataKeyPairWithoutPlaintextWithContext is the same as GenerateDataKeyPairWithoutPlaintext with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyPairWithoutPlaintext for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyPairWithoutPlaintextWithContext(ctx aws.Context, input *GenerateDataKeyPairWithoutPlaintextInput, opts ...request.Option) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" - -// GenerateDataKeyWithoutPlaintextRequest generates a "aws/request.Request" representing the -// client's request for the GenerateDataKeyWithoutPlaintext operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateDataKeyWithoutPlaintext for more information on using the GenerateDataKeyWithoutPlaintext -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateDataKeyWithoutPlaintextRequest method. -// req, resp := client.GenerateDataKeyWithoutPlaintextRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext -func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyWithoutPlaintextOutput) { - op := &request.Operation{ - Name: opGenerateDataKeyWithoutPlaintext, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateDataKeyWithoutPlaintextInput{} - } - - output = &GenerateDataKeyWithoutPlaintextOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateDataKeyWithoutPlaintext API operation for AWS Key Management Service. -// -// Returns a unique symmetric data key for use outside of KMS. This operation -// returns a data key that is encrypted under a symmetric encryption KMS key -// that you specify. The bytes in the key are random; they are not related to -// the caller or to the KMS key. -// -// GenerateDataKeyWithoutPlaintext is identical to the GenerateDataKey operation -// except that it does not return a plaintext copy of the data key. -// -// This operation is useful for systems that need to encrypt data at some point, -// but not immediately. When you need to encrypt the data, you call the Decrypt -// operation on the encrypted copy of the key. It's also useful in distributed -// systems with different levels of trust. For example, you might store encrypted -// data in containers. One component of your system creates new containers and -// stores an encrypted data key with each container. Then, a different component -// puts the data into the containers. That component first decrypts the data -// key, uses the plaintext data key to encrypt data, puts the encrypted data -// into the container, and then destroys the plaintext data key. In this system, -// the component that creates the containers never sees the plaintext data key. -// -// To request an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext -// operations. -// -// To generate a data key, you must specify the symmetric encryption KMS key -// that is used to encrypt the data key. You cannot use an asymmetric KMS key -// or a key in a custom key store to generate a data key. To get the type of -// your KMS key, use the DescribeKey operation. -// -// If the operation succeeds, you will find the encrypted copy of the data key -// in the CiphertextBlob field. -// -// You can use an optional encryption context to add additional security to -// the encryption operation. If you specify an EncryptionContext, you must specify -// the same encryption context (a case-sensitive exact match) when decrypting -// the encrypted data key. Otherwise, the request to decrypt fails with an InvalidCiphertextException. -// For more information, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateDataKeyWithoutPlaintext (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// * GenerateDataKeyPairWithoutPlaintext -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateDataKeyWithoutPlaintext for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyWithoutPlaintext -func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlaintextInput) (*GenerateDataKeyWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) - return out, req.Send() -} - -// GenerateDataKeyWithoutPlaintextWithContext is the same as GenerateDataKeyWithoutPlaintext with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateDataKeyWithoutPlaintext for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateDataKeyWithoutPlaintextWithContext(ctx aws.Context, input *GenerateDataKeyWithoutPlaintextInput, opts ...request.Option) (*GenerateDataKeyWithoutPlaintextOutput, error) { - req, out := c.GenerateDataKeyWithoutPlaintextRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateMac = "GenerateMac" - -// GenerateMacRequest generates a "aws/request.Request" representing the -// client's request for the GenerateMac operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateMac for more information on using the GenerateMac -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateMacRequest method. -// req, resp := client.GenerateMacRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMac -func (c *KMS) GenerateMacRequest(input *GenerateMacInput) (req *request.Request, output *GenerateMacOutput) { - op := &request.Operation{ - Name: opGenerateMac, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateMacInput{} - } - - output = &GenerateMacOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateMac API operation for AWS Key Management Service. -// -// Generates a hash-based message authentication code (HMAC) for a message using -// an HMAC KMS key and a MAC algorithm that the key supports. The MAC algorithm -// computes the HMAC for the message and the key as described in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104). -// -// You can use the HMAC that this operation generates with the VerifyMac operation -// to demonstrate that the original message has not changed. Also, because a -// secret key is used to create the hash, you can verify that the party that -// generated the hash has the required secret key. This operation is part of -// KMS support for HMAC KMS keys. For details, see HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) -// in the Key Management Service Developer Guide . -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GenerateMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: VerifyMac -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateMac for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateMac -func (c *KMS) GenerateMac(input *GenerateMacInput) (*GenerateMacOutput, error) { - req, out := c.GenerateMacRequest(input) - return out, req.Send() -} - -// GenerateMacWithContext is the same as GenerateMac with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateMac for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateMacWithContext(ctx aws.Context, input *GenerateMacInput, opts ...request.Option) (*GenerateMacOutput, error) { - req, out := c.GenerateMacRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGenerateRandom = "GenerateRandom" - -// GenerateRandomRequest generates a "aws/request.Request" representing the -// client's request for the GenerateRandom operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GenerateRandom for more information on using the GenerateRandom -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GenerateRandomRequest method. -// req, resp := client.GenerateRandomRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom -func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Request, output *GenerateRandomOutput) { - op := &request.Operation{ - Name: opGenerateRandom, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GenerateRandomInput{} - } - - output = &GenerateRandomOutput{} - req = c.newRequest(op, input, output) - return -} - -// GenerateRandom API operation for AWS Key Management Service. -// -// Returns a random byte string that is cryptographically secure. -// -// By default, the random byte string is generated in KMS. To generate the byte -// string in the CloudHSM cluster that is associated with a custom key store -// (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// specify the custom key store ID. -// -// Applications in Amazon Web Services Nitro Enclaves can call this operation -// by using the Amazon Web Services Nitro Enclaves Development Kit (https://github.com/aws/aws-nitro-enclaves-sdk-c). -// For information about the supporting parameters, see How Amazon Web Services -// Nitro Enclaves use KMS (https://docs.aws.amazon.com/kms/latest/developerguide/services-nitro-enclaves.html) -// in the Key Management Service Developer Guide. -// -// For more information about entropy and random number generation, see Key -// Management Service Cryptographic Details (https://docs.aws.amazon.com/kms/latest/cryptographic-details/). -// -// Required permissions: kms:GenerateRandom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GenerateRandom for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateRandom -func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, error) { - req, out := c.GenerateRandomRequest(input) - return out, req.Send() -} - -// GenerateRandomWithContext is the same as GenerateRandom with the addition of -// the ability to pass a context and additional request options. -// -// See GenerateRandom for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GenerateRandomWithContext(ctx aws.Context, input *GenerateRandomInput, opts ...request.Option) (*GenerateRandomOutput, error) { - req, out := c.GenerateRandomRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetKeyPolicy = "GetKeyPolicy" - -// GetKeyPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetKeyPolicy for more information on using the GetKeyPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetKeyPolicyRequest method. -// req, resp := client.GetKeyPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy -func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Request, output *GetKeyPolicyOutput) { - op := &request.Operation{ - Name: opGetKeyPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetKeyPolicyInput{} - } - - output = &GetKeyPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetKeyPolicy API operation for AWS Key Management Service. -// -// Gets a key policy attached to the specified KMS key. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:GetKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: PutKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetKeyPolicy for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyPolicy -func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error) { - req, out := c.GetKeyPolicyRequest(input) - return out, req.Send() -} - -// GetKeyPolicyWithContext is the same as GetKeyPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See GetKeyPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetKeyPolicyWithContext(ctx aws.Context, input *GetKeyPolicyInput, opts ...request.Option) (*GetKeyPolicyOutput, error) { - req, out := c.GetKeyPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetKeyRotationStatus = "GetKeyRotationStatus" - -// GetKeyRotationStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyRotationStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetKeyRotationStatus for more information on using the GetKeyRotationStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetKeyRotationStatusRequest method. -// req, resp := client.GetKeyRotationStatusRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus -func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req *request.Request, output *GetKeyRotationStatusOutput) { - op := &request.Operation{ - Name: opGetKeyRotationStatus, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetKeyRotationStatusInput{} - } - - output = &GetKeyRotationStatusOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetKeyRotationStatus API operation for AWS Key Management Service. -// -// Gets a Boolean value that indicates whether automatic rotation of the key -// material (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) -// is enabled for the specified KMS key. -// -// You cannot enable automatic rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), -// HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), -// KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), -// or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). -// To enable or disable automatic rotation of a set of related multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), -// set the property on the primary key. The key rotation status for these KMS -// keys is always false. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// * Disabled: The key rotation status does not change when you disable a -// KMS key. However, while the KMS key is disabled, KMS does not rotate the -// key material. -// -// * Pending deletion: While a KMS key is pending deletion, its key rotation -// status is false and KMS does not rotate the key material. If you cancel -// the deletion, the original key rotation status is restored. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:GetKeyRotationStatus (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DisableKeyRotation -// -// * EnableKeyRotation -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetKeyRotationStatus for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetKeyRotationStatus -func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRotationStatusOutput, error) { - req, out := c.GetKeyRotationStatusRequest(input) - return out, req.Send() -} - -// GetKeyRotationStatusWithContext is the same as GetKeyRotationStatus with the addition of -// the ability to pass a context and additional request options. -// -// See GetKeyRotationStatus for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetKeyRotationStatusWithContext(ctx aws.Context, input *GetKeyRotationStatusInput, opts ...request.Option) (*GetKeyRotationStatusOutput, error) { - req, out := c.GetKeyRotationStatusRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetParametersForImport = "GetParametersForImport" - -// GetParametersForImportRequest generates a "aws/request.Request" representing the -// client's request for the GetParametersForImport operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetParametersForImport for more information on using the GetParametersForImport -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetParametersForImportRequest method. -// req, resp := client.GetParametersForImportRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport -func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) (req *request.Request, output *GetParametersForImportOutput) { - op := &request.Operation{ - Name: opGetParametersForImport, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetParametersForImportInput{} - } - - output = &GetParametersForImportOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetParametersForImport API operation for AWS Key Management Service. -// -// Returns the items you need to import key material into a symmetric encryption -// KMS key. For more information about importing key material into KMS, see -// Importing key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// This operation returns a public key and an import token. Use the public key -// to encrypt the symmetric key material. Store the import token to send with -// a subsequent ImportKeyMaterial request. -// -// You must specify the key ID of the symmetric encryption KMS key into which -// you will import key material. This KMS key's Origin must be EXTERNAL. You -// must also specify the wrapping algorithm and type of wrapping key (public -// key) that you will use to encrypt the key material. You cannot perform this -// operation on an asymmetric KMS key, an HMAC KMS key, or on any KMS key in -// a different Amazon Web Services account. -// -// To import key material, you must use the public key and import token from -// the same response. These items are valid for 24 hours. The expiration date -// and time appear in the GetParametersForImport response. You cannot use an -// expired token in an ImportKeyMaterial request. If your key and token expire, -// send another GetParametersForImport request. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:GetParametersForImport (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * ImportKeyMaterial -// -// * DeleteImportedKeyMaterial -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetParametersForImport for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetParametersForImport -func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) { - req, out := c.GetParametersForImportRequest(input) - return out, req.Send() -} - -// GetParametersForImportWithContext is the same as GetParametersForImport with the addition of -// the ability to pass a context and additional request options. -// -// See GetParametersForImport for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetParametersForImportWithContext(ctx aws.Context, input *GetParametersForImportInput, opts ...request.Option) (*GetParametersForImportOutput, error) { - req, out := c.GetParametersForImportRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opGetPublicKey = "GetPublicKey" - -// GetPublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetPublicKey for more information on using the GetPublicKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetPublicKeyRequest method. -// req, resp := client.GetPublicKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey -func (c *KMS) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Request, output *GetPublicKeyOutput) { - op := &request.Operation{ - Name: opGetPublicKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &GetPublicKeyInput{} - } - - output = &GetPublicKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// GetPublicKey API operation for AWS Key Management Service. -// -// Returns the public key of an asymmetric KMS key. Unlike the private key of -// a asymmetric KMS key, which never leaves KMS unencrypted, callers with kms:GetPublicKey -// permission can download the public key of an asymmetric KMS key. You can -// share the public key to allow others to encrypt messages and verify signatures -// outside of KMS. For information about asymmetric KMS keys, see Asymmetric -// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// You do not need to download the public key. Instead, you can use the public -// key within KMS by calling the Encrypt, ReEncrypt, or Verify operations with -// the identifier of an asymmetric KMS key. When you use the public key within -// KMS, you benefit from the authentication, authorization, and logging that -// are part of every KMS operation. You also reduce of risk of encrypting data -// that cannot be decrypted. These features are not effective outside of KMS. -// For details, see Special Considerations for Downloading Public Keys (https://docs.aws.amazon.com/kms/latest/developerguide/download-public-key.html#download-public-key-considerations). -// -// To help you use the public key safely outside of KMS, GetPublicKey returns -// important information about the public key in the response, including: -// -// * KeySpec (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeySpec): -// The type of key material in the public key, such as RSA_4096 or ECC_NIST_P521. -// -// * KeyUsage (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-KeyUsage): -// Whether the key is used for encryption or signing. -// -// * EncryptionAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-EncryptionAlgorithms) -// or SigningAlgorithms (https://docs.aws.amazon.com/kms/latest/APIReference/API_GetPublicKey.html#KMS-GetPublicKey-response-SigningAlgorithms): -// A list of the encryption algorithms or the signing algorithms for the -// key. -// -// Although KMS cannot enforce these restrictions on external operations, it -// is crucial that you use this information to prevent the public key from being -// used improperly. For example, you can prevent a public signing key from being -// used encrypt data, or prevent a public key from being used with an encryption -// algorithm that is not supported by KMS. You can also avoid errors, such as -// using the wrong signing algorithm in a verification operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:GetPublicKey (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: CreateKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation GetPublicKey for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GetPublicKey -func (c *KMS) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) - return out, req.Send() -} - -// GetPublicKeyWithContext is the same as GetPublicKey with the addition of -// the ability to pass a context and additional request options. -// -// See GetPublicKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) GetPublicKeyWithContext(ctx aws.Context, input *GetPublicKeyInput, opts ...request.Option) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opImportKeyMaterial = "ImportKeyMaterial" - -// ImportKeyMaterialRequest generates a "aws/request.Request" representing the -// client's request for the ImportKeyMaterial operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ImportKeyMaterial for more information on using the ImportKeyMaterial -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ImportKeyMaterialRequest method. -// req, resp := client.ImportKeyMaterialRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial -func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *request.Request, output *ImportKeyMaterialOutput) { - op := &request.Operation{ - Name: opImportKeyMaterial, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ImportKeyMaterialInput{} - } - - output = &ImportKeyMaterialOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// ImportKeyMaterial API operation for AWS Key Management Service. -// -// Imports key material into an existing symmetric encryption KMS key that was -// created without key material. After you successfully import key material -// into a KMS key, you can reimport the same key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#reimport-key-material) -// into that KMS key, but you cannot import different key material. -// -// You cannot perform this operation on an asymmetric KMS key, an HMAC KMS key, -// or on any KMS key in a different Amazon Web Services account. For more information -// about creating KMS keys with no key material and then importing key material, -// see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) -// in the Key Management Service Developer Guide. -// -// Before using this operation, call GetParametersForImport. Its response includes -// a public key and an import token. Use the public key to encrypt the key material. -// Then, submit the import token from the same GetParametersForImport response. -// -// When calling this operation, you must specify the following values: -// -// * The key ID or key ARN of a KMS key with no key material. Its Origin -// must be EXTERNAL. To create a KMS key with no key material, call CreateKey -// and set the value of its Origin parameter to EXTERNAL. To get the Origin -// of a KMS key, call DescribeKey.) -// -// * The encrypted key material. To get the public key to encrypt the key -// material, call GetParametersForImport. -// -// * The import token that GetParametersForImport returned. You must use -// a public key and token from the same GetParametersForImport response. -// -// * Whether the key material expires and if so, when. If you set an expiration -// date, KMS deletes the key material from the KMS key on the specified date, -// and the KMS key becomes unusable. To use the KMS key again, you must reimport -// the same key material. The only way to change an expiration date is by -// reimporting the same key material and specifying a new expiration date. -// -// When this operation is successful, the key state of the KMS key changes from -// PendingImport to Enabled, and you can use the KMS key. -// -// If this operation fails, use the exception to help determine the problem. -// If the error is related to the key material, the import token, or wrapping -// key, use GetParametersForImport to get a new public key and import token -// for the KMS key and repeat the import procedure. For help, see How To Import -// Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html#importing-keys-overview) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ImportKeyMaterial (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * DeleteImportedKeyMaterial -// -// * GetParametersForImport -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ImportKeyMaterial for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * IncorrectKeyMaterialException -// The request was rejected because the key material in the request is, expired, -// invalid, or is not the same key material that was previously imported into -// this KMS key. -// -// * ExpiredImportTokenException -// The request was rejected because the specified import token is expired. Use -// GetParametersForImport to get a new import token and public key, use the -// new public key to encrypt the key material, and then try the request again. -// -// * InvalidImportTokenException -// The request was rejected because the provided import token is invalid or -// is associated with a different KMS key. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ImportKeyMaterial -func (c *KMS) ImportKeyMaterial(input *ImportKeyMaterialInput) (*ImportKeyMaterialOutput, error) { - req, out := c.ImportKeyMaterialRequest(input) - return out, req.Send() -} - -// ImportKeyMaterialWithContext is the same as ImportKeyMaterial with the addition of -// the ability to pass a context and additional request options. -// -// See ImportKeyMaterial for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ImportKeyMaterialWithContext(ctx aws.Context, input *ImportKeyMaterialInput, opts ...request.Option) (*ImportKeyMaterialOutput, error) { - req, out := c.ImportKeyMaterialRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListAliases = "ListAliases" - -// ListAliasesRequest generates a "aws/request.Request" representing the -// client's request for the ListAliases operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListAliases for more information on using the ListAliases -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListAliasesRequest method. -// req, resp := client.ListAliasesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases -func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { - op := &request.Operation{ - Name: opListAliases, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListAliasesInput{} - } - - output = &ListAliasesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListAliases API operation for AWS Key Management Service. -// -// Gets a list of aliases in the caller's Amazon Web Services account and region. -// For more information about aliases, see CreateAlias. -// -// By default, the ListAliases operation returns all aliases in the account -// and region. To get only the aliases associated with a particular KMS key, -// use the KeyId parameter. -// -// The ListAliases response can include aliases that you created and associated -// with your customer managed keys, and aliases that Amazon Web Services created -// and associated with Amazon Web Services managed keys in your account. You -// can recognize Amazon Web Services aliases because their names have the format -// aws/, such as aws/dynamodb. -// -// The response might also include aliases that have no TargetKeyId field. These -// are predefined aliases that Amazon Web Services has created but has not yet -// associated with a KMS key. Aliases that Amazon Web Services creates in your -// account, including predefined aliases, do not count against your KMS aliases -// quota (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#aliases-limit). -// -// Cross-account use: No. ListAliases does not return aliases in other Amazon -// Web Services accounts. -// -// Required permissions: kms:ListAliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * DeleteAlias -// -// * UpdateAlias -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListAliases for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListAliases -func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - return out, req.Send() -} - -// ListAliasesWithContext is the same as ListAliases with the addition of -// the ability to pass a context and additional request options. -// -// See ListAliases for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListAliasesPages iterates over the pages of a ListAliases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAliases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAliases operation. -// pageNum := 0 -// err := client.ListAliasesPages(params, -// func(page *kms.ListAliasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool) error { - return c.ListAliasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAliasesPagesWithContext same as ListAliasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAliasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAliasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListGrants = "ListGrants" - -// ListGrantsRequest generates a "aws/request.Request" representing the -// client's request for the ListGrants operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListGrants for more information on using the ListGrants -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListGrantsRequest method. -// req, resp := client.ListGrantsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants -func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, output *ListGrantsResponse) { - op := &request.Operation{ - Name: opListGrants, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListGrantsInput{} - } - - output = &ListGrantsResponse{} - req = c.newRequest(op, input, output) - return -} - -// ListGrants API operation for AWS Key Management Service. -// -// Gets a list of all grants for the specified KMS key. -// -// You must specify the KMS key in all requests. You can filter the grant list -// by grant ID or grantee principal. -// -// For detailed information about grants, including grant terminology, see Grants -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// The GranteePrincipal field in the ListGrants response usually contains the -// user or role designated as the grantee principal in the grant. However, when -// the grantee principal in the grant is an Amazon Web Services service, the -// GranteePrincipal field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), -// which might represent several different grantee principals. -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:ListGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * CreateGrant -// -// * ListRetirableGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListGrants for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListGrants -func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { - req, out := c.ListGrantsRequest(input) - return out, req.Send() -} - -// ListGrantsWithContext is the same as ListGrants with the addition of -// the ability to pass a context and additional request options. -// -// See ListGrants for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListGrantsWithContext(ctx aws.Context, input *ListGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { - req, out := c.ListGrantsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListGrantsPages iterates over the pages of a ListGrants operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListGrants method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListGrants operation. -// pageNum := 0 -// err := client.ListGrantsPages(params, -// func(page *kms.ListGrantsResponse, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool) error { - return c.ListGrantsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListGrantsPagesWithContext same as ListGrantsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListGrantsPagesWithContext(ctx aws.Context, input *ListGrantsInput, fn func(*ListGrantsResponse, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListGrantsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListGrantsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListGrantsResponse), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListKeyPolicies = "ListKeyPolicies" - -// ListKeyPoliciesRequest generates a "aws/request.Request" representing the -// client's request for the ListKeyPolicies operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListKeyPolicies for more information on using the ListKeyPolicies -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListKeyPoliciesRequest method. -// req, resp := client.ListKeyPoliciesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies -func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.Request, output *ListKeyPoliciesOutput) { - op := &request.Operation{ - Name: opListKeyPolicies, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListKeyPoliciesInput{} - } - - output = &ListKeyPoliciesOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListKeyPolicies API operation for AWS Key Management Service. -// -// Gets the names of the key policies that are attached to a KMS key. This operation -// is designed to get policy names that you can use in a GetKeyPolicy operation. -// However, the only valid policy name is default. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListKeyPolicies (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * GetKeyPolicy -// -// * PutKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListKeyPolicies for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeyPolicies -func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutput, error) { - req, out := c.ListKeyPoliciesRequest(input) - return out, req.Send() -} - -// ListKeyPoliciesWithContext is the same as ListKeyPolicies with the addition of -// the ability to pass a context and additional request options. -// -// See ListKeyPolicies for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeyPoliciesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, opts ...request.Option) (*ListKeyPoliciesOutput, error) { - req, out := c.ListKeyPoliciesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListKeyPoliciesPages iterates over the pages of a ListKeyPolicies operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListKeyPolicies method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListKeyPolicies operation. -// pageNum := 0 -// err := client.ListKeyPoliciesPages(params, -// func(page *kms.ListKeyPoliciesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool) error { - return c.ListKeyPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListKeyPoliciesPagesWithContext same as ListKeyPoliciesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeyPoliciesPagesWithContext(ctx aws.Context, input *ListKeyPoliciesInput, fn func(*ListKeyPoliciesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListKeyPoliciesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListKeyPoliciesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListKeyPoliciesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListKeys = "ListKeys" - -// ListKeysRequest generates a "aws/request.Request" representing the -// client's request for the ListKeys operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListKeys for more information on using the ListKeys -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListKeysRequest method. -// req, resp := client.ListKeysRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys -func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) { - op := &request.Operation{ - Name: opListKeys, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "Truncated", - }, - } - - if input == nil { - input = &ListKeysInput{} - } - - output = &ListKeysOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListKeys API operation for AWS Key Management Service. -// -// Gets a list of all KMS keys in the caller's Amazon Web Services account and -// Region. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListKeys (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * CreateKey -// -// * DescribeKey -// -// * ListAliases -// -// * ListResourceTags -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListKeys for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListKeys -func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { - req, out := c.ListKeysRequest(input) - return out, req.Send() -} - -// ListKeysWithContext is the same as ListKeys with the addition of -// the ability to pass a context and additional request options. -// -// See ListKeys for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeysWithContext(ctx aws.Context, input *ListKeysInput, opts ...request.Option) (*ListKeysOutput, error) { - req, out := c.ListKeysRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// ListKeysPages iterates over the pages of a ListKeys operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListKeys method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListKeys operation. -// pageNum := 0 -// err := client.ListKeysPages(params, -// func(page *kms.ListKeysOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(*ListKeysOutput, bool) bool) error { - return c.ListKeysPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListKeysPagesWithContext same as ListKeysPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListKeysPagesWithContext(ctx aws.Context, input *ListKeysInput, fn func(*ListKeysOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListKeysInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListKeysRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListKeysOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListResourceTags = "ListResourceTags" - -// ListResourceTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListResourceTags operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListResourceTags for more information on using the ListResourceTags -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListResourceTagsRequest method. -// req, resp := client.ListResourceTagsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags -func (c *KMS) ListResourceTagsRequest(input *ListResourceTagsInput) (req *request.Request, output *ListResourceTagsOutput) { - op := &request.Operation{ - Name: opListResourceTags, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListResourceTagsInput{} - } - - output = &ListResourceTagsOutput{} - req = c.newRequest(op, input, output) - return -} - -// ListResourceTags API operation for AWS Key Management Service. -// -// Returns all tags on the specified KMS key. -// -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. For information about using -// tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ListResourceTags (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: -// -// * CreateKey -// -// * ReplicateKey -// -// * TagResource -// -// * UntagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListResourceTags for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListResourceTags -func (c *KMS) ListResourceTags(input *ListResourceTagsInput) (*ListResourceTagsOutput, error) { - req, out := c.ListResourceTagsRequest(input) - return out, req.Send() -} - -// ListResourceTagsWithContext is the same as ListResourceTags with the addition of -// the ability to pass a context and additional request options. -// -// See ListResourceTags for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListResourceTagsWithContext(ctx aws.Context, input *ListResourceTagsInput, opts ...request.Option) (*ListResourceTagsOutput, error) { - req, out := c.ListResourceTagsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opListRetirableGrants = "ListRetirableGrants" - -// ListRetirableGrantsRequest generates a "aws/request.Request" representing the -// client's request for the ListRetirableGrants operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListRetirableGrants for more information on using the ListRetirableGrants -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListRetirableGrantsRequest method. -// req, resp := client.ListRetirableGrantsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants -func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *request.Request, output *ListGrantsResponse) { - op := &request.Operation{ - Name: opListRetirableGrants, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListRetirableGrantsInput{} - } - - output = &ListGrantsResponse{} - req = c.newRequest(op, input, output) - return -} - -// ListRetirableGrants API operation for AWS Key Management Service. -// -// Returns information about all grants in the Amazon Web Services account and -// Region that have the specified retiring principal. -// -// You can specify any principal in your Amazon Web Services account. The grants -// that are returned include grants for KMS keys in your Amazon Web Services -// account and other Amazon Web Services accounts. You might use this operation -// to determine which grants you may retire. To retire a grant, use the RetireGrant -// operation. -// -// For detailed information about grants, including grant terminology, see Grants -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: You must specify a principal in your Amazon Web Services -// account. However, this operation can return grants in any Amazon Web Services -// account. You do not need kms:ListRetirableGrants permission (or any other -// additional permission) in any Amazon Web Services account other than your -// own. -// -// Required permissions: kms:ListRetirableGrants (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) in your Amazon Web Services account. -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * RetireGrant -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ListRetirableGrants for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidMarkerException -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ListRetirableGrants -func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsResponse, error) { - req, out := c.ListRetirableGrantsRequest(input) - return out, req.Send() -} - -// ListRetirableGrantsWithContext is the same as ListRetirableGrants with the addition of -// the ability to pass a context and additional request options. -// -// See ListRetirableGrants for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ListRetirableGrantsWithContext(ctx aws.Context, input *ListRetirableGrantsInput, opts ...request.Option) (*ListGrantsResponse, error) { - req, out := c.ListRetirableGrantsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutKeyPolicy = "PutKeyPolicy" - -// PutKeyPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutKeyPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutKeyPolicy for more information on using the PutKeyPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutKeyPolicyRequest method. -// req, resp := client.PutKeyPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy -func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Request, output *PutKeyPolicyOutput) { - op := &request.Operation{ - Name: opPutKeyPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutKeyPolicyInput{} - } - - output = &PutKeyPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutKeyPolicy API operation for AWS Key Management Service. -// -// Attaches a key policy to the specified KMS key. -// -// For more information about key policies, see Key Policies (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) -// in the Key Management Service Developer Guide. For help writing and formatting -// a JSON policy document, see the IAM JSON Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) -// in the Identity and Access Management User Guide . For examples of adding -// a key policy in multiple programming languages, see Setting a key policy -// (https://docs.aws.amazon.com/kms/latest/developerguide/programming-key-policies.html#put-policy) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:PutKeyPolicy (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: GetKeyPolicy -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation PutKeyPolicy for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/PutKeyPolicy -func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { - req, out := c.PutKeyPolicyRequest(input) - return out, req.Send() -} - -// PutKeyPolicyWithContext is the same as PutKeyPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutKeyPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) PutKeyPolicyWithContext(ctx aws.Context, input *PutKeyPolicyInput, opts ...request.Option) (*PutKeyPolicyOutput, error) { - req, out := c.PutKeyPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opReEncrypt = "ReEncrypt" - -// ReEncryptRequest generates a "aws/request.Request" representing the -// client's request for the ReEncrypt operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ReEncrypt for more information on using the ReEncrypt -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ReEncryptRequest method. -// req, resp := client.ReEncryptRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt -func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, output *ReEncryptOutput) { - op := &request.Operation{ - Name: opReEncrypt, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ReEncryptInput{} - } - - output = &ReEncryptOutput{} - req = c.newRequest(op, input, output) - return -} - -// ReEncrypt API operation for AWS Key Management Service. -// -// Decrypts ciphertext and then reencrypts it entirely within KMS. You can use -// this operation to change the KMS key under which data is encrypted, such -// as when you manually rotate (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) -// a KMS key or change the KMS key that protects a ciphertext. You can also -// use it to reencrypt ciphertext under the same KMS key, such as to change -// the encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) -// of a ciphertext. -// -// The ReEncrypt operation can decrypt ciphertext that was encrypted by using -// a KMS key in an KMS operation, such as Encrypt or GenerateDataKey. It can -// also decrypt ciphertext that was encrypted by using the public key of an -// asymmetric KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) -// outside of KMS. However, it cannot decrypt ciphertext produced by other libraries, -// such as the Amazon Web Services Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) -// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). -// These libraries return a ciphertext format that is incompatible with KMS. -// -// When you use the ReEncrypt operation, you need to provide information for -// the decrypt operation and the subsequent encrypt operation. -// -// * If your ciphertext was encrypted under an asymmetric KMS key, you must -// use the SourceKeyId parameter to identify the KMS key that encrypted the -// ciphertext. You must also supply the encryption algorithm that was used. -// This information is required to decrypt the data. -// -// * If your ciphertext was encrypted under a symmetric encryption KMS key, -// the SourceKeyId parameter is optional. KMS can get this information from -// metadata that it adds to the symmetric ciphertext blob. This feature adds -// durability to your implementation by ensuring that authorized users can -// decrypt ciphertext decades after it was encrypted, even if they've lost -// track of the key ID. However, specifying the source KMS key is always -// recommended as a best practice. When you use the SourceKeyId parameter -// to specify a KMS key, KMS uses only the KMS key you specify. If the ciphertext -// was encrypted under a different KMS key, the ReEncrypt operation fails. -// This practice ensures that you use the KMS key that you intend. -// -// * To reencrypt the data, you must use the DestinationKeyId parameter specify -// the KMS key that re-encrypts the data after it is decrypted. If the destination -// KMS key is an asymmetric KMS key, you must also provide the encryption -// algorithm. The algorithm that you choose must be compatible with the KMS -// key. When you use an asymmetric KMS key to encrypt or reencrypt data, -// be sure to record the KMS key and encryption algorithm that you choose. -// You will be required to provide the same KMS key and encryption algorithm -// when you decrypt the data. If the KMS key and algorithm do not match the -// values used to encrypt the data, the decrypt operation fails. You are -// not required to supply the key ID and encryption algorithm when you decrypt -// with symmetric encryption KMS keys because KMS stores this information -// in the ciphertext blob. KMS cannot store metadata in ciphertext generated -// with asymmetric keys. The standard format for asymmetric key ciphertext -// does not include configurable fields. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. The source KMS key and destination KMS key can be -// in different Amazon Web Services accounts. Either or both KMS keys can be -// in a different account than the caller. To specify a KMS key in a different -// account, you must use its key ARN or alias ARN. -// -// Required permissions: -// -// * kms:ReEncryptFrom (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the source KMS key (key policy) -// -// * kms:ReEncryptTo (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// permission on the destination KMS key (key policy) -// -// To permit reencryption from or to a KMS key, include the "kms:ReEncrypt*" -// permission in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). -// This permission is automatically included in the key policy when you use -// the console to create a KMS key. But you must include it manually when you -// create a KMS key programmatically or when you use the PutKeyPolicy operation -// to set a key policy. -// -// Related operations: -// -// * Decrypt -// -// * Encrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyPair -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ReEncrypt for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidCiphertextException -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * IncorrectKeyException -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReEncrypt -func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { - req, out := c.ReEncryptRequest(input) - return out, req.Send() -} - -// ReEncryptWithContext is the same as ReEncrypt with the addition of -// the ability to pass a context and additional request options. -// -// See ReEncrypt for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ReEncryptWithContext(ctx aws.Context, input *ReEncryptInput, opts ...request.Option) (*ReEncryptOutput, error) { - req, out := c.ReEncryptRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opReplicateKey = "ReplicateKey" - -// ReplicateKeyRequest generates a "aws/request.Request" representing the -// client's request for the ReplicateKey operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ReplicateKey for more information on using the ReplicateKey -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ReplicateKeyRequest method. -// req, resp := client.ReplicateKeyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKey -func (c *KMS) ReplicateKeyRequest(input *ReplicateKeyInput) (req *request.Request, output *ReplicateKeyOutput) { - op := &request.Operation{ - Name: opReplicateKey, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ReplicateKeyInput{} - } - - output = &ReplicateKeyOutput{} - req = c.newRequest(op, input, output) - return -} - -// ReplicateKey API operation for AWS Key Management Service. -// -// Replicates a multi-Region key into the specified Region. This operation creates -// a multi-Region replica key based on a multi-Region primary key in a different -// Region of the same Amazon Web Services partition. You can create multiple -// replicas of a primary key, but each must be in a different Region. To create -// a multi-Region primary key, use the CreateKey operation. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// A replica key is a fully-functional KMS key that can be used independently -// of its primary and peer replica keys. A primary key and its replica keys -// share properties that make them interoperable. They have the same key ID -// (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id) -// and key material. They also have the same key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation status (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). -// KMS automatically synchronizes these shared properties among related multi-Region -// keys. All other properties of a replica key can differ, including its key -// policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html), -// tags (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html), -// aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html), -// and Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). -// KMS pricing and quotas for KMS keys apply to each primary key and replica -// key. -// -// When this operation completes, the new replica key has a transient key state -// of Creating. This key state changes to Enabled (or PendingImport) after a -// few seconds when the process of creating the new replica key is complete. -// While the key state is Creating, you can manage key, but you cannot yet use -// it in cryptographic operations. If you are creating and using the replica -// key programmatically, retry on KMSInvalidStateException or call DescribeKey -// to check its KeyState value before using it. For details about the Creating -// key state, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// You cannot create more than one replica of a primary key in any Region. If -// the Region already includes a replica of the key you're trying to replicate, -// ReplicateKey returns an AlreadyExistsException error. If the key state of -// the existing replica is PendingDeletion, you can cancel the scheduled key -// deletion (CancelKeyDeletion) or wait for the key to be deleted. The new replica -// key you create will have the same shared properties (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html#mrk-sync-properties) -// as the original replica key. -// -// The CloudTrail log of a ReplicateKey operation records a ReplicateKey operation -// in the primary key's Region and a CreateKey operation in the replica key's -// Region. -// -// If you replicate a multi-Region primary key with imported key material, the -// replica key is created with no key material. You must import the same key -// material that you imported into the primary key. For details, see Importing -// key material into multi-Region keys (kms/latest/developerguide/multi-region-keys-import.html) -// in the Key Management Service Developer Guide. -// -// To convert a replica key to a primary key, use the UpdatePrimaryRegion operation. -// -// ReplicateKey uses different default values for the KeyPolicy and Tags parameters -// than those used in the KMS console. For details, see the parameter descriptions. -// -// Cross-account use: No. You cannot use this operation to create a replica -// key in a different Amazon Web Services account. -// -// Required permissions: -// -// * kms:ReplicateKey on the primary key (in the primary key's Region). Include -// this permission in the primary key's key policy. -// -// * kms:CreateKey in an IAM policy in the replica Region. -// -// * To use the Tags parameter, kms:TagResource in an IAM policy in the replica -// Region. -// -// Related operations -// -// * CreateKey -// -// * UpdatePrimaryRegion -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ReplicateKey for usage and error information. -// -// Returned Error Types: -// * AlreadyExistsException -// The request was rejected because it attempted to create a resource that already -// exists. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * MalformedPolicyDocumentException -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ReplicateKey -func (c *KMS) ReplicateKey(input *ReplicateKeyInput) (*ReplicateKeyOutput, error) { - req, out := c.ReplicateKeyRequest(input) - return out, req.Send() -} - -// ReplicateKeyWithContext is the same as ReplicateKey with the addition of -// the ability to pass a context and additional request options. -// -// See ReplicateKey for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ReplicateKeyWithContext(ctx aws.Context, input *ReplicateKeyInput, opts ...request.Option) (*ReplicateKeyOutput, error) { - req, out := c.ReplicateKeyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRetireGrant = "RetireGrant" - -// RetireGrantRequest generates a "aws/request.Request" representing the -// client's request for the RetireGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RetireGrant for more information on using the RetireGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RetireGrantRequest method. -// req, resp := client.RetireGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant -func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, output *RetireGrantOutput) { - op := &request.Operation{ - Name: opRetireGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RetireGrantInput{} - } - - output = &RetireGrantOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RetireGrant API operation for AWS Key Management Service. -// -// Deletes a grant. Typically, you retire a grant when you no longer need its -// permissions. To identify the grant to retire, use a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token), -// or both the grant ID and a key identifier (key ID or key ARN) of the KMS -// key. The CreateGrant operation returns both values. -// -// This operation can be called by the retiring principal for a grant, by the -// grantee principal if the grant allows the RetireGrant operation, and by the -// Amazon Web Services account in which the grant is created. It can also be -// called by principals to whom permission for retiring a grant is delegated. -// For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) -// in the Key Management Service Developer Guide. -// -// For detailed information about grants, including grant terminology, see Grants -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: Yes. You can retire a grant on a KMS key in a different -// Amazon Web Services account. -// -// Required permissions::Permission to retire a grant is determined primarily -// by the grant. For details, see Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RevokeGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation RetireGrant for usage and error information. -// -// Returned Error Types: -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RetireGrant -func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { - req, out := c.RetireGrantRequest(input) - return out, req.Send() -} - -// RetireGrantWithContext is the same as RetireGrant with the addition of -// the ability to pass a context and additional request options. -// -// See RetireGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) RetireGrantWithContext(ctx aws.Context, input *RetireGrantInput, opts ...request.Option) (*RetireGrantOutput, error) { - req, out := c.RetireGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRevokeGrant = "RevokeGrant" - -// RevokeGrantRequest generates a "aws/request.Request" representing the -// client's request for the RevokeGrant operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RevokeGrant for more information on using the RevokeGrant -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RevokeGrantRequest method. -// req, resp := client.RevokeGrantRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant -func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, output *RevokeGrantOutput) { - op := &request.Operation{ - Name: opRevokeGrant, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RevokeGrantInput{} - } - - output = &RevokeGrantOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RevokeGrant API operation for AWS Key Management Service. -// -// Deletes the specified grant. You revoke a grant to terminate the permissions -// that the grant allows. For more information, see Retiring and revoking grants -// (https://docs.aws.amazon.com/kms/latest/developerguide/managing-grants.html#grant-delete) -// in the Key Management Service Developer Guide . -// -// When you create, retire, or revoke a grant, there might be a brief delay, -// usually less than five minutes, until the grant is available throughout KMS. -// This state is known as eventual consistency. For details, see Eventual consistency -// (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) -// in the Key Management Service Developer Guide . -// -// For detailed information about grants, including grant terminology, see Grants -// in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html) -// in the Key Management Service Developer Guide . For examples of working with -// grants in several programming languages, see Programming grants (https://docs.aws.amazon.com/kms/latest/developerguide/programming-grants.html). -// -// Cross-account use: Yes. To perform this operation on a KMS key in a different -// Amazon Web Services account, specify the key ARN in the value of the KeyId -// parameter. -// -// Required permissions: kms:RevokeGrant (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy). -// -// Related operations: -// -// * CreateGrant -// -// * ListGrants -// -// * ListRetirableGrants -// -// * RetireGrant -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation RevokeGrant for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidGrantIdException -// The request was rejected because the specified GrantId is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/RevokeGrant -func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { - req, out := c.RevokeGrantRequest(input) - return out, req.Send() -} - -// RevokeGrantWithContext is the same as RevokeGrant with the addition of -// the ability to pass a context and additional request options. -// -// See RevokeGrant for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) RevokeGrantWithContext(ctx aws.Context, input *RevokeGrantInput, opts ...request.Option) (*RevokeGrantOutput, error) { - req, out := c.RevokeGrantRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opScheduleKeyDeletion = "ScheduleKeyDeletion" - -// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the -// client's request for the ScheduleKeyDeletion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ScheduleKeyDeletion for more information on using the ScheduleKeyDeletion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ScheduleKeyDeletionRequest method. -// req, resp := client.ScheduleKeyDeletionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion -func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) { - op := &request.Operation{ - Name: opScheduleKeyDeletion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ScheduleKeyDeletionInput{} - } - - output = &ScheduleKeyDeletionOutput{} - req = c.newRequest(op, input, output) - return -} - -// ScheduleKeyDeletion API operation for AWS Key Management Service. -// -// Schedules the deletion of a KMS key. By default, KMS applies a waiting period -// of 30 days, but you can specify a waiting period of 7-30 days. When this -// operation is successful, the key state of the KMS key changes to PendingDeletion -// and the key can't be used in any cryptographic operations. It remains in -// this state for the duration of the waiting period. Before the waiting period -// ends, you can use CancelKeyDeletion to cancel the deletion of the KMS key. -// After the waiting period ends, KMS deletes the KMS key, its key material, -// and all KMS data associated with it, including all aliases that refer to -// it. -// -// Deleting a KMS key is a destructive and potentially dangerous operation. -// When a KMS key is deleted, all data that was encrypted under the KMS key -// is unrecoverable. (The only exception is a multi-Region replica key.) To -// prevent the use of a KMS key without deleting it, use DisableKey. -// -// If you schedule deletion of a KMS key from a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), -// when the waiting period expires, ScheduleKeyDeletion deletes the KMS key -// from KMS. Then KMS makes a best effort to delete the key material from the -// associated CloudHSM cluster. However, you might need to manually delete the -// orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) -// from the cluster and its backups. -// -// You can schedule the deletion of a multi-Region primary key and its replica -// keys at any time. However, KMS will not delete a multi-Region primary key -// with existing replica keys. If you schedule the deletion of a primary key -// with replicas, its key state changes to PendingReplicaDeletion and it cannot -// be replicated or used in cryptographic operations. This status can continue -// indefinitely. When the last of its replicas keys is deleted (not just scheduled), -// the key state of the primary key changes to PendingDeletion and its waiting -// period (PendingWindowInDays) begins. For details, see Deleting multi-Region -// keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-delete.html) -// in the Key Management Service Developer Guide. -// -// For more information about scheduling a KMS key for deletion, see Deleting -// KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:ScheduleKeyDeletion (key policy) -// -// Related operations -// -// * CancelKeyDeletion -// -// * DisableKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation ScheduleKeyDeletion for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/ScheduleKeyDeletion -func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { - req, out := c.ScheduleKeyDeletionRequest(input) - return out, req.Send() -} - -// ScheduleKeyDeletionWithContext is the same as ScheduleKeyDeletion with the addition of -// the ability to pass a context and additional request options. -// -// See ScheduleKeyDeletion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) ScheduleKeyDeletionWithContext(ctx aws.Context, input *ScheduleKeyDeletionInput, opts ...request.Option) (*ScheduleKeyDeletionOutput, error) { - req, out := c.ScheduleKeyDeletionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opSign = "Sign" - -// SignRequest generates a "aws/request.Request" representing the -// client's request for the Sign operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Sign for more information on using the Sign -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the SignRequest method. -// req, resp := client.SignRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign -func (c *KMS) SignRequest(input *SignInput) (req *request.Request, output *SignOutput) { - op := &request.Operation{ - Name: opSign, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &SignInput{} - } - - output = &SignOutput{} - req = c.newRequest(op, input, output) - return -} - -// Sign API operation for AWS Key Management Service. -// -// Creates a digital signature (https://en.wikipedia.org/wiki/Digital_signature) -// for a message or message digest by using the private key in an asymmetric -// signing KMS key. To verify the signature, use the Verify operation, or use -// the public key in the same asymmetric KMS key outside of KMS. For information -// about asymmetric KMS keys, see Asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// Digital signatures are generated and verified by using asymmetric key pair, -// such as an RSA or ECC pair that is represented by an asymmetric KMS key. -// The key owner (or an authorized user) uses their private key to sign a message. -// Anyone with the public key can verify that the message was signed with that -// particular private key and that the message hasn't changed since it was signed. -// -// To use the Sign operation, provide the following information: -// -// * Use the KeyId parameter to identify an asymmetric KMS key with a KeyUsage -// value of SIGN_VERIFY. To get the KeyUsage value of a KMS key, use the -// DescribeKey operation. The caller must have kms:Sign permission on the -// KMS key. -// -// * Use the Message parameter to specify the message or message digest to -// sign. You can submit messages of up to 4096 bytes. To sign a larger message, -// generate a hash digest of the message, and then provide the hash digest -// in the Message parameter. To indicate whether the message is a full message -// or a digest, use the MessageType parameter. -// -// * Choose a signing algorithm that is compatible with the KMS key. -// -// When signing a message, be sure to record the KMS key and the signing algorithm. -// This information is required to verify the signature. -// -// To verify the signature that this operation generates, use the Verify operation. -// Or use the GetPublicKey operation to download the public key and then use -// the public key to verify the signature outside of KMS. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Sign (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: Verify -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Sign for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Sign -func (c *KMS) Sign(input *SignInput) (*SignOutput, error) { - req, out := c.SignRequest(input) - return out, req.Send() -} - -// SignWithContext is the same as Sign with the addition of -// the ability to pass a context and additional request options. -// -// See Sign for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) SignWithContext(ctx aws.Context, input *SignInput, opts ...request.Option) (*SignOutput, error) { - req, out := c.SignRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opTagResource = "TagResource" - -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See TagResource for more information on using the TagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource -func (c *KMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { - op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TagResourceInput{} - } - - output = &TagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// TagResource API operation for AWS Key Management Service. -// -// Adds or edits tags on a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// -// Tagging or untagging a KMS key can allow or deny permission to the KMS key. -// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// Each tag consists of a tag key and a tag value, both of which are case-sensitive -// strings. The tag value can be an empty (null) string. To add a tag, specify -// a new tag key and a tag value. To edit a tag, specify an existing tag key -// and a new tag value. -// -// You can use this operation to tag a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk), -// but you cannot tag an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk), -// an Amazon Web Services owned key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-owned-cmk), -// a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#keystore-concept), -// or an alias (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#alias-concept). -// -// You can also add tags to a KMS key while creating it (CreateKey) or replicating -// it (ReplicateKey). -// -// For information about using tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * ReplicateKey -// -// * UntagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation TagResource for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/TagResource -func (c *KMS) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - return out, req.Send() -} - -// TagResourceWithContext is the same as TagResource with the addition of -// the ability to pass a context and additional request options. -// -// See TagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUntagResource = "UntagResource" - -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UntagResource for more information on using the UntagResource -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource -func (c *KMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { - op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UntagResourceInput{} - } - - output = &UntagResourceOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UntagResource API operation for AWS Key Management Service. -// -// Deletes tags from a customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). -// To delete a tag, specify the tag key and the KMS key. -// -// Tagging or untagging a KMS key can allow or deny permission to the KMS key. -// For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// When it succeeds, the UntagResource operation doesn't return any output. -// Also, if the specified tag key isn't found on the KMS key, it doesn't throw -// an exception or return a response. To confirm that the operation worked, -// use the ListResourceTags operation. -// -// For information about using tags in KMS, see Tagging keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). -// For general information about tags, including the format and syntax, see -// Tagging Amazon Web Services resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) -// in the Amazon Web Services General Reference. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:UntagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * ListResourceTags -// -// * ReplicateKey -// -// * TagResource -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UntagResource for usage and error information. -// -// Returned Error Types: -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * TagException -// The request was rejected because one or more tags are not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UntagResource -func (c *KMS) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateAlias = "UpdateAlias" - -// UpdateAliasRequest generates a "aws/request.Request" representing the -// client's request for the UpdateAlias operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateAlias for more information on using the UpdateAlias -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateAliasRequest method. -// req, resp := client.UpdateAliasRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias -func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { - op := &request.Operation{ - Name: opUpdateAlias, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateAliasInput{} - } - - output = &UpdateAliasOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateAlias API operation for AWS Key Management Service. -// -// Associates an existing KMS alias with a different KMS key. Each alias is -// associated with only one KMS key at a time, although a KMS key can have multiple -// aliases. The alias and the KMS key must be in the same Amazon Web Services -// account and Region. -// -// Adding, deleting, or updating an alias can allow or deny permission to the -// KMS key. For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) -// in the Key Management Service Developer Guide. -// -// The current and new KMS key must be the same type (both symmetric or both -// asymmetric), and they must have the same key usage (ENCRYPT_DECRYPT or SIGN_VERIFY). -// This restriction prevents errors in code that uses aliases. If you must assign -// an alias to a different type of KMS key, use DeleteAlias to delete the old -// alias and CreateAlias to create a new alias. -// -// You cannot use UpdateAlias to change an alias name. To change an alias name, -// use DeleteAlias to delete the old alias and CreateAlias to create a new alias. -// -// Because an alias is not a property of a KMS key, you can create, update, -// and delete the aliases of a KMS key without affecting the KMS key. Also, -// aliases do not appear in the response from the DescribeKey operation. To -// get the aliases of all KMS keys in the account, use the ListAliases operation. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the alias (IAM policy). -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the current KMS key (key policy). -// -// * kms:UpdateAlias (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// on the new KMS key (key policy). -// -// For details, see Controlling access to aliases (https://docs.aws.amazon.com/kms/latest/developerguide/kms-alias.html#alias-access) -// in the Key Management Service Developer Guide. -// -// Related operations: -// -// * CreateAlias -// -// * DeleteAlias -// -// * ListAliases -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateAlias for usage and error information. -// -// Returned Error Types: -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * LimitExceededException -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateAlias -func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - return out, req.Send() -} - -// UpdateAliasWithContext is the same as UpdateAlias with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateAlias for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateAliasWithContext(ctx aws.Context, input *UpdateAliasInput, opts ...request.Option) (*UpdateAliasOutput, error) { - req, out := c.UpdateAliasRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateCustomKeyStore = "UpdateCustomKeyStore" - -// UpdateCustomKeyStoreRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCustomKeyStore operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateCustomKeyStore for more information on using the UpdateCustomKeyStore -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateCustomKeyStoreRequest method. -// req, resp := client.UpdateCustomKeyStoreRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore -func (c *KMS) UpdateCustomKeyStoreRequest(input *UpdateCustomKeyStoreInput) (req *request.Request, output *UpdateCustomKeyStoreOutput) { - op := &request.Operation{ - Name: opUpdateCustomKeyStore, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateCustomKeyStoreInput{} - } - - output = &UpdateCustomKeyStoreOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateCustomKeyStore API operation for AWS Key Management Service. -// -// Changes the properties of a custom key store. Use the CustomKeyStoreId parameter -// to identify the custom key store you want to edit. Use the remaining parameters -// to change the properties of the custom key store. -// -// You can only update a custom key store that is disconnected. To disconnect -// the custom key store, use DisconnectCustomKeyStore. To reconnect the custom -// key store after the update completes, use ConnectCustomKeyStore. To find -// the connection state of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// The CustomKeyStoreId parameter is required in all commands. Use the other -// parameters of UpdateCustomKeyStore to edit your key store settings. -// -// * Use the NewCustomKeyStoreName parameter to change the friendly name -// of the custom key store to the value that you specify. -// -// * Use the KeyStorePassword parameter tell KMS the current password of -// the kmsuser crypto user (CU) (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) -// in the associated CloudHSM cluster. You can use this parameter to fix -// connection failures (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-password) -// that occur when KMS cannot log into the associated cluster because the -// kmsuser password has changed. This value does not change the password -// in the CloudHSM cluster. -// -// * Use the CloudHsmClusterId parameter to associate the custom key store -// with a different, but related, CloudHSM cluster. You can use this parameter -// to repair a custom key store if its CloudHSM cluster becomes corrupted -// or is deleted, or when you need to create or restore a cluster from a -// backup. -// -// If the operation succeeds, it returns a JSON object with no properties. -// -// This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) -// feature in KMS, which combines the convenience and extensive integration -// of KMS with the isolation and control of a single-tenant key store. -// -// Cross-account use: No. You cannot perform this operation on a custom key -// store in a different Amazon Web Services account. -// -// Required permissions: kms:UpdateCustomKeyStore (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (IAM policy) -// -// Related operations: -// -// * ConnectCustomKeyStore -// -// * CreateCustomKeyStore -// -// * DeleteCustomKeyStore -// -// * DescribeCustomKeyStores -// -// * DisconnectCustomKeyStore -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateCustomKeyStore for usage and error information. -// -// Returned Error Types: -// * CustomKeyStoreNotFoundException -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -// -// * CustomKeyStoreNameInUseException -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -// -// * CloudHsmClusterNotFoundException -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -// -// * CloudHsmClusterNotRelatedException -// The request was rejected because the specified CloudHSM cluster has a different -// cluster certificate than the original cluster. You cannot use the operation -// to specify an unrelated cluster. -// -// Specify a cluster that shares a backup history with the original cluster. -// This includes clusters that were created from a backup of the current cluster, -// and clusters that were created from the same backup that produced the current -// cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -// -// * CustomKeyStoreInvalidStateException -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * CloudHsmClusterNotActiveException -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -// -// * CloudHsmClusterInvalidConfigurationException -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateCustomKeyStore -func (c *KMS) UpdateCustomKeyStore(input *UpdateCustomKeyStoreInput) (*UpdateCustomKeyStoreOutput, error) { - req, out := c.UpdateCustomKeyStoreRequest(input) - return out, req.Send() -} - -// UpdateCustomKeyStoreWithContext is the same as UpdateCustomKeyStore with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateCustomKeyStore for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateCustomKeyStoreWithContext(ctx aws.Context, input *UpdateCustomKeyStoreInput, opts ...request.Option) (*UpdateCustomKeyStoreOutput, error) { - req, out := c.UpdateCustomKeyStoreRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateKeyDescription = "UpdateKeyDescription" - -// UpdateKeyDescriptionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateKeyDescription operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateKeyDescription for more information on using the UpdateKeyDescription -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdateKeyDescriptionRequest method. -// req, resp := client.UpdateKeyDescriptionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription -func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req *request.Request, output *UpdateKeyDescriptionOutput) { - op := &request.Operation{ - Name: opUpdateKeyDescription, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateKeyDescriptionInput{} - } - - output = &UpdateKeyDescriptionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdateKeyDescription API operation for AWS Key Management Service. -// -// Updates the description of a KMS key. To see the description of a KMS key, -// use DescribeKey. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: No. You cannot perform this operation on a KMS key in -// a different Amazon Web Services account. -// -// Required permissions: kms:UpdateKeyDescription (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations -// -// * CreateKey -// -// * DescribeKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdateKeyDescription for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdateKeyDescription -func (c *KMS) UpdateKeyDescription(input *UpdateKeyDescriptionInput) (*UpdateKeyDescriptionOutput, error) { - req, out := c.UpdateKeyDescriptionRequest(input) - return out, req.Send() -} - -// UpdateKeyDescriptionWithContext is the same as UpdateKeyDescription with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateKeyDescription for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdateKeyDescriptionWithContext(ctx aws.Context, input *UpdateKeyDescriptionInput, opts ...request.Option) (*UpdateKeyDescriptionOutput, error) { - req, out := c.UpdateKeyDescriptionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdatePrimaryRegion = "UpdatePrimaryRegion" - -// UpdatePrimaryRegionRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePrimaryRegion operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdatePrimaryRegion for more information on using the UpdatePrimaryRegion -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the UpdatePrimaryRegionRequest method. -// req, resp := client.UpdatePrimaryRegionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegion -func (c *KMS) UpdatePrimaryRegionRequest(input *UpdatePrimaryRegionInput) (req *request.Request, output *UpdatePrimaryRegionOutput) { - op := &request.Operation{ - Name: opUpdatePrimaryRegion, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdatePrimaryRegionInput{} - } - - output = &UpdatePrimaryRegionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// UpdatePrimaryRegion API operation for AWS Key Management Service. -// -// Changes the primary key of a multi-Region key. -// -// This operation changes the replica key in the specified Region to a primary -// key and changes the former primary key to a replica key. For example, suppose -// you have a primary key in us-east-1 and a replica key in eu-west-2. If you -// run UpdatePrimaryRegion with a PrimaryRegion value of eu-west-2, the primary -// key is now the key in eu-west-2, and the key in us-east-1 becomes a replica -// key. For details, see Updating the primary Region (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-update) -// in the Key Management Service Developer Guide. -// -// This operation supports multi-Region keys, an KMS feature that lets you create -// multiple interoperable KMS keys in different Amazon Web Services Regions. -// Because these KMS keys have the same key ID, key material, and other metadata, -// you can use them interchangeably to encrypt data in one Amazon Web Services -// Region and decrypt it in a different Amazon Web Services Region without re-encrypting -// the data or making a cross-Region call. For more information about multi-Region -// keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) -// in the Key Management Service Developer Guide. -// -// The primary key of a multi-Region key is the source for properties that are -// always shared by primary and replica keys, including the key material, key -// ID (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-id), -// key spec (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-spec), -// key usage (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-usage), -// key material origin (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-origin), -// and automatic key rotation (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html). -// It's the only key that can be replicated. You cannot delete the primary key -// (https://docs.aws.amazon.com/kms/latest/APIReference/API_ScheduleKeyDeletion.html) -// until all replica keys are deleted. -// -// The key ID and primary Region that you specify uniquely identify the replica -// key that will become the primary key. The primary Region must already have -// a replica key. This operation does not create a KMS key in the specified -// Region. To find the replica keys, use the DescribeKey operation on the primary -// key or any replica key. To create a replica key, use the ReplicateKey operation. -// -// You can run this operation while using the affected multi-Region keys in -// cryptographic operations. This operation should not delay, interrupt, or -// cause failures in cryptographic operations. -// -// Even after this operation completes, the process of updating the primary -// Region might still be in progress for a few more seconds. Operations such -// as DescribeKey might display both the old and new primary keys as replicas. -// The old and new primary keys have a transient key state of Updating. The -// original key state is restored when the update is complete. While the key -// state is Updating, you can use the keys in cryptographic operations, but -// you cannot replicate the new primary key or perform certain management operations, -// such as enabling or disabling these keys. For details about the Updating -// key state, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// This operation does not return any output. To verify that primary key is -// changed, use the DescribeKey operation. -// -// Cross-account use: No. You cannot use this operation in a different Amazon -// Web Services account. -// -// Required permissions: -// -// * kms:UpdatePrimaryRegion on the current primary key (in the primary key's -// Region). Include this permission primary key's key policy. -// -// * kms:UpdatePrimaryRegion on the current replica key (in the replica key's -// Region). Include this permission in the replica key's key policy. -// -// Related operations -// -// * CreateKey -// -// * ReplicateKey -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation UpdatePrimaryRegion for usage and error information. -// -// Returned Error Types: -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * InvalidArnException -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * UnsupportedOperationException -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/UpdatePrimaryRegion -func (c *KMS) UpdatePrimaryRegion(input *UpdatePrimaryRegionInput) (*UpdatePrimaryRegionOutput, error) { - req, out := c.UpdatePrimaryRegionRequest(input) - return out, req.Send() -} - -// UpdatePrimaryRegionWithContext is the same as UpdatePrimaryRegion with the addition of -// the ability to pass a context and additional request options. -// -// See UpdatePrimaryRegion for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) UpdatePrimaryRegionWithContext(ctx aws.Context, input *UpdatePrimaryRegionInput, opts ...request.Option) (*UpdatePrimaryRegionOutput, error) { - req, out := c.UpdatePrimaryRegionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opVerify = "Verify" - -// VerifyRequest generates a "aws/request.Request" representing the -// client's request for the Verify operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See Verify for more information on using the Verify -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the VerifyRequest method. -// req, resp := client.VerifyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify -func (c *KMS) VerifyRequest(input *VerifyInput) (req *request.Request, output *VerifyOutput) { - op := &request.Operation{ - Name: opVerify, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &VerifyInput{} - } - - output = &VerifyOutput{} - req = c.newRequest(op, input, output) - return -} - -// Verify API operation for AWS Key Management Service. -// -// Verifies a digital signature that was generated by the Sign operation. -// -// Verification confirms that an authorized user signed the message with the -// specified KMS key and signing algorithm, and the message hasn't changed since -// it was signed. If the signature is verified, the value of the SignatureValid -// field in the response is True. If the signature verification fails, the Verify -// operation fails with an KMSInvalidSignatureException exception. -// -// A digital signature is generated by using the private key in an asymmetric -// KMS key. The signature is verified by using the public key in the same asymmetric -// KMS key. For information about asymmetric KMS keys, see Asymmetric KMS keys -// (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the Key Management Service Developer Guide. -// -// To verify a digital signature, you can use the Verify operation. Specify -// the same asymmetric KMS key, message, and signing algorithm that were used -// to produce the signature. -// -// You can also verify the digital signature by using the public key of the -// KMS key outside of KMS. Use the GetPublicKey operation to download the public -// key in the asymmetric KMS key and then use the public key to verify the signature -// outside of KMS. The advantage of using the Verify operation is that it is -// performed within KMS. As a result, it's easy to call, the operation is performed -// within the FIPS boundary, it is logged in CloudTrail, and you can use key -// policy and IAM policy to determine who is authorized to use the KMS key to -// verify signatures. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:Verify (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: Sign -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation Verify for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * DependencyTimeoutException -// The system timed out while trying to fulfill the request. The request can -// be retried. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// * KMSInvalidSignatureException -// The request was rejected because the signature verification failed. Signature -// verification fails when it cannot confirm that signature was produced by -// signing the specified message with the specified KMS key and signing algorithm. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/Verify -func (c *KMS) Verify(input *VerifyInput) (*VerifyOutput, error) { - req, out := c.VerifyRequest(input) - return out, req.Send() -} - -// VerifyWithContext is the same as Verify with the addition of -// the ability to pass a context and additional request options. -// -// See Verify for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) VerifyWithContext(ctx aws.Context, input *VerifyInput, opts ...request.Option) (*VerifyOutput, error) { - req, out := c.VerifyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opVerifyMac = "VerifyMac" - -// VerifyMacRequest generates a "aws/request.Request" representing the -// client's request for the VerifyMac operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See VerifyMac for more information on using the VerifyMac -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the VerifyMacRequest method. -// req, resp := client.VerifyMacRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMac -func (c *KMS) VerifyMacRequest(input *VerifyMacInput) (req *request.Request, output *VerifyMacOutput) { - op := &request.Operation{ - Name: opVerifyMac, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &VerifyMacInput{} - } - - output = &VerifyMacOutput{} - req = c.newRequest(op, input, output) - return -} - -// VerifyMac API operation for AWS Key Management Service. -// -// Verifies the hash-based message authentication code (HMAC) for a specified -// message, HMAC KMS key, and MAC algorithm. To verify the HMAC, VerifyMac computes -// an HMAC using the message, HMAC KMS key, and MAC algorithm that you specify, -// and compares the computed HMAC to the HMAC that you specify. If the HMACs -// are identical, the verification succeeds; otherwise, it fails. -// -// Verification indicates that the message hasn't changed since the HMAC was -// calculated, and the specified key was used to generate and verify the HMAC. -// -// This operation is part of KMS support for HMAC KMS keys. For details, see -// HMAC keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) -// in the Key Management Service Developer Guide. -// -// The KMS key that you use for this operation must be in a compatible key state. -// For details, see Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide. -// -// Cross-account use: Yes. To perform this operation with a KMS key in a different -// Amazon Web Services account, specify the key ARN or alias ARN in the value -// of the KeyId parameter. -// -// Required permissions: kms:VerifyMac (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) -// (key policy) -// -// Related operations: GenerateMac -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for AWS Key Management Service's -// API operation VerifyMac for usage and error information. -// -// Returned Error Types: -// * NotFoundException -// The request was rejected because the specified entity or resource could not -// be found. -// -// * DisabledException -// The request was rejected because the specified KMS key is not enabled. -// -// * KeyUnavailableException -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -// -// * InvalidKeyUsageException -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -// -// * InvalidGrantTokenException -// The request was rejected because the specified grant token is not valid. -// -// * InternalException -// The request was rejected because an internal exception occurred. The request -// can be retried. -// -// * KMSInvalidMacException -// The request was rejected because the HMAC verification failed. HMAC verification -// fails when the HMAC computed by using the specified message, HMAC KMS key, -// and MAC algorithm does not match the HMAC specified in the request. -// -// * InvalidStateException -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/VerifyMac -func (c *KMS) VerifyMac(input *VerifyMacInput) (*VerifyMacOutput, error) { - req, out := c.VerifyMacRequest(input) - return out, req.Send() -} - -// VerifyMacWithContext is the same as VerifyMac with the addition of -// the ability to pass a context and additional request options. -// -// See VerifyMac for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *KMS) VerifyMacWithContext(ctx aws.Context, input *VerifyMacInput, opts ...request.Option) (*VerifyMacOutput, error) { - req, out := c.VerifyMacRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Contains information about an alias. -type AliasListEntry struct { - _ struct{} `type:"structure"` - - // String that contains the key ARN. - AliasArn *string `min:"20" type:"string"` - - // String that contains the alias. This value begins with alias/. - AliasName *string `min:"1" type:"string"` - - // Date and time that the alias was most recently created in the account and - // Region. Formatted as Unix time. - CreationDate *time.Time `type:"timestamp"` - - // Date and time that the alias was most recently associated with a KMS key - // in the account and Region. Formatted as Unix time. - LastUpdatedDate *time.Time `type:"timestamp"` - - // String that contains the key identifier of the KMS key associated with the - // alias. - TargetKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AliasListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AliasListEntry) GoString() string { - return s.String() -} - -// SetAliasArn sets the AliasArn field's value. -func (s *AliasListEntry) SetAliasArn(v string) *AliasListEntry { - s.AliasArn = &v - return s -} - -// SetAliasName sets the AliasName field's value. -func (s *AliasListEntry) SetAliasName(v string) *AliasListEntry { - s.AliasName = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *AliasListEntry) SetCreationDate(v time.Time) *AliasListEntry { - s.CreationDate = &v - return s -} - -// SetLastUpdatedDate sets the LastUpdatedDate field's value. -func (s *AliasListEntry) SetLastUpdatedDate(v time.Time) *AliasListEntry { - s.LastUpdatedDate = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *AliasListEntry) SetTargetKeyId(v string) *AliasListEntry { - s.TargetKeyId = &v - return s -} - -// The request was rejected because it attempted to create a resource that already -// exists. -type AlreadyExistsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AlreadyExistsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s AlreadyExistsException) GoString() string { - return s.String() -} - -func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { - return &AlreadyExistsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *AlreadyExistsException) Code() string { - return "AlreadyExistsException" -} - -// Message returns the exception's message. -func (s *AlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *AlreadyExistsException) OrigErr() error { - return nil -} - -func (s *AlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *AlreadyExistsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *AlreadyExistsException) RequestID() string { - return s.RespMetadata.RequestID -} - -type CancelKeyDeletionInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key whose deletion is being canceled. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelKeyDeletionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelKeyDeletionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *CancelKeyDeletionInput) SetKeyId(v string) *CancelKeyDeletionInput { - s.KeyId = &v - return s -} - -type CancelKeyDeletionOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is canceled. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CancelKeyDeletionOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput { - s.KeyId = &v - return s -} - -// The request was rejected because the specified CloudHSM cluster is already -// associated with a custom key store or it shares a backup history with a cluster -// that is associated with a custom key store. Each custom key store must be -// associated with a different CloudHSM cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -type CloudHsmClusterInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInUseException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterInUseException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterInUseException) Code() string { - return "CloudHsmClusterInUseException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterInUseException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the associated CloudHSM cluster did not -// meet the configuration requirements for a custom key store. -// -// * The cluster must be configured with private subnets in at least two -// different Availability Zones in the Region. -// -// * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// (cloudhsm-cluster--sg) must include inbound rules and outbound -// rules that allow TCP traffic on ports 2223-2225. The Source in the inbound -// rules and the Destination in the outbound rules must match the security -// group ID. These rules are set by default when you create the cluster. -// Do not delete or change them. To get information about a particular security -// group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) -// operation. -// -// * The cluster must contain at least as many HSMs as the operation requires. -// To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) -// operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey -// operations, the CloudHSM cluster must have at least two active HSMs, each -// in a different Availability Zone. For the ConnectCustomKeyStore operation, -// the CloudHSM must contain at least one active HSM. -// -// For information about the requirements for an CloudHSM cluster that is associated -// with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) -// in the Key Management Service Developer Guide. For information about creating -// a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) -// in the CloudHSM User Guide. For information about cluster security groups, -// see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) -// in the CloudHSM User Guide . -type CloudHsmClusterInvalidConfigurationException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInvalidConfigurationException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterInvalidConfigurationException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterInvalidConfigurationException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterInvalidConfigurationException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterInvalidConfigurationException) Code() string { - return "CloudHsmClusterInvalidConfigurationException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterInvalidConfigurationException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterInvalidConfigurationException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterInvalidConfigurationException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterInvalidConfigurationException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the CloudHSM cluster that is associated -// with the custom key store is not active. Initialize and activate the cluster -// and try the command again. For detailed instructions, see Getting Started -// (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) -// in the CloudHSM User Guide. -type CloudHsmClusterNotActiveException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotActiveException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotActiveException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotActiveException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotActiveException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotActiveException) Code() string { - return "CloudHsmClusterNotActiveException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotActiveException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotActiveException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotActiveException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotActiveException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotActiveException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because KMS cannot find the CloudHSM cluster with -// the specified cluster ID. Retry the request with a different cluster ID. -type CloudHsmClusterNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotFoundException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotFoundException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotFoundException) Code() string { - return "CloudHsmClusterNotFoundException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotFoundException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified CloudHSM cluster has a different -// cluster certificate than the original cluster. You cannot use the operation -// to specify an unrelated cluster. -// -// Specify a cluster that shares a backup history with the original cluster. -// This includes clusters that were created from a backup of the current cluster, -// and clusters that were created from the same backup that produced the current -// cluster. -// -// Clusters that share a backup history have the same cluster certificate. To -// view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) -// operation. -type CloudHsmClusterNotRelatedException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotRelatedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CloudHsmClusterNotRelatedException) GoString() string { - return s.String() -} - -func newErrorCloudHsmClusterNotRelatedException(v protocol.ResponseMetadata) error { - return &CloudHsmClusterNotRelatedException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CloudHsmClusterNotRelatedException) Code() string { - return "CloudHsmClusterNotRelatedException" -} - -// Message returns the exception's message. -func (s *CloudHsmClusterNotRelatedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CloudHsmClusterNotRelatedException) OrigErr() error { - return nil -} - -func (s *CloudHsmClusterNotRelatedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CloudHsmClusterNotRelatedException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CloudHsmClusterNotRelatedException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ConnectCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the key store ID of the custom key store that you want to connect. - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ConnectCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConnectCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *ConnectCustomKeyStoreInput) SetCustomKeyStoreId(v string) *ConnectCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type ConnectCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ConnectCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type CreateAliasInput struct { - _ struct{} `type:"structure"` - - // Specifies the alias name. This value must begin with alias/ followed by a - // name, such as alias/ExampleAlias. - // - // The AliasName value must be string of 1-256 characters. It can contain only - // alphanumeric characters, forward slashes (/), underscores (_), and dashes - // (-). The alias name cannot begin with alias/aws/. The alias/aws/ prefix is - // reserved for Amazon Web Services managed keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` - - // Associates the alias with the specified customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk). - // The KMS key must be in the same Amazon Web Services Region. - // - // A valid key ID is required. If you supply a null or empty string value, this - // operation returns an error. - // - // For help finding the key ID and ARN, see Finding the Key ID and ARN (https://docs.aws.amazon.com/kms/latest/developerguide/viewing-keys.html#find-cmk-id-arn) - // in the Key Management Service Developer Guide . - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // TargetKeyId is a required field - TargetKeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.TargetKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) - } - if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *CreateAliasInput) SetAliasName(v string) *CreateAliasInput { - s.AliasName = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *CreateAliasInput) SetTargetKeyId(v string) *CreateAliasInput { - s.TargetKeyId = &v - return s -} - -type CreateAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateAliasOutput) GoString() string { - return s.String() -} - -type CreateCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Identifies the CloudHSM cluster for the custom key store. Enter the cluster - // ID of any active CloudHSM cluster that is not already associated with a custom - // key store. To find the cluster ID, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - // - // CloudHsmClusterId is a required field - CloudHsmClusterId *string `min:"19" type:"string" required:"true"` - - // Specifies a friendly name for the custom key store. The name must be unique - // in your Amazon Web Services account. - // - // CustomKeyStoreName is a required field - CustomKeyStoreName *string `min:"1" type:"string" required:"true"` - - // Enter the password of the kmsuser crypto user (CU) account (https://docs.aws.amazon.com/kms/latest/developerguide/key-store-concepts.html#concept-kmsuser) - // in the specified CloudHSM cluster. KMS logs into the cluster as this user - // to manage key material on your behalf. - // - // The password must be a string of 7 to 32 characters. Its value is case sensitive. - // - // This parameter tells KMS the kmsuser account password; it does not change - // the password in the CloudHSM cluster. - // - // KeyStorePassword is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by CreateCustomKeyStoreInput's - // String and GoString methods. - // - // KeyStorePassword is a required field - KeyStorePassword *string `min:"7" type:"string" required:"true" sensitive:"true"` - - // Enter the content of the trust anchor certificate for the cluster. This is - // the content of the customerCA.crt file that you created when you initialized - // the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html). - // - // TrustAnchorCertificate is a required field - TrustAnchorCertificate *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCustomKeyStoreInput"} - if s.CloudHsmClusterId == nil { - invalidParams.Add(request.NewErrParamRequired("CloudHsmClusterId")) - } - if s.CloudHsmClusterId != nil && len(*s.CloudHsmClusterId) < 19 { - invalidParams.Add(request.NewErrParamMinLen("CloudHsmClusterId", 19)) - } - if s.CustomKeyStoreName == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreName")) - } - if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1)) - } - if s.KeyStorePassword == nil { - invalidParams.Add(request.NewErrParamRequired("KeyStorePassword")) - } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) - } - if s.TrustAnchorCertificate == nil { - invalidParams.Add(request.NewErrParamRequired("TrustAnchorCertificate")) - } - if s.TrustAnchorCertificate != nil && len(*s.TrustAnchorCertificate) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TrustAnchorCertificate", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *CreateCustomKeyStoreInput) SetCloudHsmClusterId(v string) *CreateCustomKeyStoreInput { - s.CloudHsmClusterId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *CreateCustomKeyStoreInput) SetCustomKeyStoreName(v string) *CreateCustomKeyStoreInput { - s.CustomKeyStoreName = &v - return s -} - -// SetKeyStorePassword sets the KeyStorePassword field's value. -func (s *CreateCustomKeyStoreInput) SetKeyStorePassword(v string) *CreateCustomKeyStoreInput { - s.KeyStorePassword = &v - return s -} - -// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value. -func (s *CreateCustomKeyStoreInput) SetTrustAnchorCertificate(v string) *CreateCustomKeyStoreInput { - s.TrustAnchorCertificate = &v - return s -} - -type CreateCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` - - // A unique identifier for the new custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateCustomKeyStoreOutput) GoString() string { - return s.String() -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CreateCustomKeyStoreOutput) SetCustomKeyStoreId(v string) *CreateCustomKeyStoreOutput { - s.CustomKeyStoreId = &v - return s -} - -type CreateGrantInput struct { - _ struct{} `type:"structure"` - - // Specifies a grant constraint. - // - // KMS supports the EncryptionContextEquals and EncryptionContextSubset grant - // constraints. Each constraint value can include up to 8 encryption context - // pairs. The encryption context value in each constraint cannot exceed 384 - // characters. For information about grant constraints, see Using grant constraints - // (https://docs.aws.amazon.com/kms/latest/developerguide/create-grant-overview.html#grant-constraints) - // in the Key Management Service Developer Guide. For more information about - // encryption context, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide . - // - // The encryption context grant constraints allow the permissions in the grant - // only when the encryption context in the request matches (EncryptionContextEquals) - // or includes (EncryptionContextSubset) the encryption context specified in - // this structure. - // - // The encryption context grant constraints are supported only on grant operations - // (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) - // that include an EncryptionContext parameter, such as cryptographic operations - // on symmetric encryption KMS keys. Grants with grant constraints can include - // the DescribeKey and RetireGrant operations, but the constraint doesn't apply - // to these operations. If a grant with a grant constraint includes the CreateGrant - // operation, the constraint requires that any grants created with the CreateGrant - // permission have an equally strict or stricter encryption context constraint. - // - // You cannot use an encryption context grant constraint for cryptographic operations - // with asymmetric KMS keys or HMAC KMS keys. These keys don't support an encryption - // context. - Constraints *GrantConstraints `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // The identity that gets the permissions specified in the grant. - // - // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, IAM roles, federated - // users, and assumed role users. For examples of the ARN syntax to use for - // specifying a principal, see Amazon Web Services Identity and Access Management - // (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // GranteePrincipal is a required field - GranteePrincipal *string `min:"1" type:"string" required:"true"` - - // Identifies the KMS key for the grant. The grant gives principals permission - // to use this KMS key. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // A friendly name for the grant. Use this value to prevent the unintended creation - // of duplicate grants when retrying this request. - // - // When this value is absent, all CreateGrant requests result in a new grant - // with a unique GrantId even if all the supplied parameters are identical. - // This can result in unintended duplicates when you retry the CreateGrant request. - // - // When this value is present, you can retry a CreateGrant request with identical - // parameters; if the grant already exists, the original GrantId is returned - // without creating a new grant. Note that the returned grant token is unique - // with every CreateGrant request, even when a duplicate GrantId is returned. - // All grant tokens for the same grant ID can be used interchangeably. - Name *string `min:"1" type:"string"` - - // A list of operations that the grant permits. - // - // This list must include only operations that are permitted in a grant. Also, - // the operation must be supported on the KMS key. For example, you cannot create - // a grant for a symmetric encryption KMS key that allows the Sign operation, - // or a grant for an asymmetric KMS key that allows the GenerateDataKey operation. - // If you try, KMS returns a ValidationError exception. For details, see Grant - // operations (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-grant-operations) - // in the Key Management Service Developer Guide. - // - // Operations is a required field - Operations []*string `type:"list" required:"true" enum:"GrantOperation"` - - // The principal that has permission to use the RetireGrant operation to retire - // the grant. - // - // To specify the principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, federated users, - // and assumed role users. For examples of the ARN syntax to use for specifying - // a principal, see Amazon Web Services Identity and Access Management (IAM) - // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // The grant determines the retiring principal. Other principals might have - // permission to retire the grant or revoke the grant. For details, see RevokeGrant - // and Retiring and revoking grants (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#grant-delete) - // in the Key Management Service Developer Guide. - RetiringPrincipal *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGrantInput"} - if s.GranteePrincipal == nil { - invalidParams.Add(request.NewErrParamRequired("GranteePrincipal")) - } - if s.GranteePrincipal != nil && len(*s.GranteePrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GranteePrincipal", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Operations == nil { - invalidParams.Add(request.NewErrParamRequired("Operations")) - } - if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetConstraints sets the Constraints field's value. -func (s *CreateGrantInput) SetConstraints(v *GrantConstraints) *CreateGrantInput { - s.Constraints = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *CreateGrantInput) SetGrantTokens(v []*string) *CreateGrantInput { - s.GrantTokens = v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *CreateGrantInput) SetGranteePrincipal(v string) *CreateGrantInput { - s.GranteePrincipal = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *CreateGrantInput) SetKeyId(v string) *CreateGrantInput { - s.KeyId = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateGrantInput) SetName(v string) *CreateGrantInput { - s.Name = &v - return s -} - -// SetOperations sets the Operations field's value. -func (s *CreateGrantInput) SetOperations(v []*string) *CreateGrantInput { - s.Operations = v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *CreateGrantInput) SetRetiringPrincipal(v string) *CreateGrantInput { - s.RetiringPrincipal = &v - return s -} - -type CreateGrantOutput struct { - _ struct{} `type:"structure"` - - // The unique identifier for the grant. - // - // You can use the GrantId in a ListGrants, RetireGrant, or RevokeGrant operation. - GrantId *string `min:"1" type:"string"` - - // The grant token. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateGrantOutput) GoString() string { - return s.String() -} - -// SetGrantId sets the GrantId field's value. -func (s *CreateGrantOutput) SetGrantId(v string) *CreateGrantOutput { - s.GrantId = &v - return s -} - -// SetGrantToken sets the GrantToken field's value. -func (s *CreateGrantOutput) SetGrantToken(v string) *CreateGrantOutput { - s.GrantToken = &v - return s -} - -type CreateKeyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide . - // - // Use this parameter only when you include a policy in the request and you - // intend to prevent the principal that is making the request from making a - // subsequent PutKeyPolicy request on the KMS key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // Creates the KMS key in the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // and the key material in its associated CloudHSM cluster. To create a KMS - // key in a custom key store, you must also specify the Origin parameter with - // a value of AWS_CLOUDHSM. The CloudHSM cluster that is associated with the - // custom key store must have at least two active HSMs, each in a different - // Availability Zone in the Region. - // - // This parameter is valid only for symmetric encryption KMS keys in a single - // Region. You cannot create any other type of KMS key in a custom key store. - // - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // The response includes the custom key store ID and the ID of the CloudHSM - // cluster. - // - // This operation is part of the Custom Key Store feature (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // feature in KMS, which combines the convenience and extensive integration - // of KMS with the isolation and control of a single-tenant key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Instead, use the KeySpec parameter. - // - // The KeySpec and CustomerMasterKeySpec parameters work the same way. Only - // the names differ. We recommend that you use KeySpec parameter in your code. - // However, to avoid breaking changes, KMS will support both parameters. - // - // Deprecated: This parameter has been deprecated. Instead, use the KeySpec parameter. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // A description of the KMS key. - // - // Use a description that helps you decide whether the KMS key is appropriate - // for a task. The default value is an empty string (no description). - // - // To set or change the description after the key is created, use UpdateKeyDescription. - Description *string `type:"string"` - - // Specifies the type of KMS key to create. The default value, SYMMETRIC_DEFAULT, - // creates a KMS key with a 256-bit symmetric key for encryption and decryption. - // For help choosing a key spec for your KMS key, see Choosing a KMS key type - // (https://docs.aws.amazon.com/kms/latest/developerguide/key-types.html#symm-asymm-choose) - // in the Key Management Service Developer Guide . - // - // The KeySpec determines whether the KMS key contains a symmetric key or an - // asymmetric key pair. It also determines the algorithms that the KMS key supports. - // You can't change the KeySpec after the KMS key is created. To further restrict - // the algorithms that can be used with the KMS key, use a condition key in - // its key policy or IAM policy. For more information, see kms:EncryptionAlgorithm - // (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-algorithm), - // kms:MacAlgorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-mac-algorithm) - // or kms:Signing Algorithm (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-signing-algorithm) - // in the Key Management Service Developer Guide . - // - // Amazon Web Services services that are integrated with KMS (http://aws.amazon.com/kms/features/#AWS_Service_Integration) - // use symmetric encryption KMS keys to protect your data. These services do - // not support asymmetric KMS keys or HMAC KMS keys. - // - // KMS supports the following key specs for KMS keys: - // - // * Symmetric encryption key (default) SYMMETRIC_DEFAULT (AES-256-GCM) - // - // * HMAC keys (symmetric) HMAC_224 HMAC_256 HMAC_384 HMAC_512 - // - // * Asymmetric RSA key pairs RSA_2048 RSA_3072 RSA_4096 - // - // * Asymmetric NIST-recommended elliptic curve key pairs ECC_NIST_P256 (secp256r1) - // ECC_NIST_P384 (secp384r1) ECC_NIST_P521 (secp521r1) - // - // * Other asymmetric elliptic curve key pairs ECC_SECG_P256K1 (secp256k1), - // commonly used for cryptocurrencies. - KeySpec *string `type:"string" enum:"KeySpec"` - - // Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // for which you can use the KMS key. The default value is ENCRYPT_DECRYPT. - // This parameter is optional when you are creating a symmetric encryption KMS - // key; otherwise, it is required. You can't change the KeyUsage value after - // the KMS key is created. - // - // Select only one valid value. - // - // * For symmetric encryption KMS keys, omit the parameter or specify ENCRYPT_DECRYPT. - // - // * For HMAC KMS keys (symmetric), specify GENERATE_VERIFY_MAC. - // - // * For asymmetric KMS keys with RSA key material, specify ENCRYPT_DECRYPT - // or SIGN_VERIFY. - // - // * For asymmetric KMS keys with ECC key material, specify SIGN_VERIFY. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // Creates a multi-Region primary key that you can replicate into other Amazon - // Web Services Regions. You cannot change this value after you create the KMS - // key. - // - // For a multi-Region key, set this parameter to True. For a single-Region KMS - // key, omit this parameter or set it to False. The default value is False. - // - // This operation supports multi-Region keys, an KMS feature that lets you create - // multiple interoperable KMS keys in different Amazon Web Services Regions. - // Because these KMS keys have the same key ID, key material, and other metadata, - // you can use them interchangeably to encrypt data in one Amazon Web Services - // Region and decrypt it in a different Amazon Web Services Region without re-encrypting - // the data or making a cross-Region call. For more information about multi-Region - // keys, see Multi-Region keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) - // in the Key Management Service Developer Guide. - // - // This value creates a primary key, not a replica. To create a replica key, - // use the ReplicateKey operation. - // - // You can create a symmetric or asymmetric multi-Region key, and you can create - // a multi-Region key with imported key material. However, you cannot create - // a multi-Region key in a custom key store. - MultiRegion *bool `type:"boolean"` - - // The source of the key material for the KMS key. You cannot change the origin - // after you create the KMS key. The default is AWS_KMS, which means that KMS - // creates the key material. - // - // To create a KMS key with no key material (for imported key material), set - // the value to EXTERNAL. For more information about importing key material - // into KMS, see Importing Key Material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) - // in the Key Management Service Developer Guide. This value is valid only for - // symmetric encryption KMS keys. - // - // To create a KMS key in an KMS custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // and create its key material in the associated CloudHSM cluster, set this - // value to AWS_CLOUDHSM. You must also use the CustomKeyStoreId parameter to - // identify the custom key store. This value is valid only for symmetric encryption - // KMS keys. - Origin *string `type:"string" enum:"OriginType"` - - // The key policy to attach to the KMS key. - // - // If you provide a key policy, it must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must allow the principal that is making the CreateKey request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk - // that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // If you do not provide a key policy, KMS attaches a default key policy to - // the KMS key. For more information, see Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) - // in the Key Management Service Developer Guide. - // - // The key policy size quota is 32 kilobytes (32768 bytes). - // - // For help writing and formatting a JSON policy document, see the IAM JSON - // Policy Reference (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html) - // in the Identity and Access Management User Guide . - Policy *string `min:"1" type:"string"` - - // Assigns one or more tags to the KMS key. Use this parameter to tag the KMS - // key when it is created. To tag an existing KMS key, use the TagResource operation. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - // - // To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) - // permission in an IAM policy. - // - // Each tag consists of a tag key and a tag value. Both the tag key and the - // tag value are required, but the tag value can be an empty (null) string. - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // When you add tags to an Amazon Web Services resource, Amazon Web Services - // generates a cost allocation report with usage and costs aggregated by tags. - // Tags can also be used to control access to a KMS key. For details, see Tagging - // Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateKeyInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *CreateKeyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *CreateKeyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CreateKeyInput) SetCustomKeyStoreId(v string) *CreateKeyInput { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *CreateKeyInput) SetCustomerMasterKeySpec(v string) *CreateKeyInput { - s.CustomerMasterKeySpec = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateKeyInput) SetDescription(v string) *CreateKeyInput { - s.Description = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *CreateKeyInput) SetKeySpec(v string) *CreateKeyInput { - s.KeySpec = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *CreateKeyInput) SetKeyUsage(v string) *CreateKeyInput { - s.KeyUsage = &v - return s -} - -// SetMultiRegion sets the MultiRegion field's value. -func (s *CreateKeyInput) SetMultiRegion(v bool) *CreateKeyInput { - s.MultiRegion = &v - return s -} - -// SetOrigin sets the Origin field's value. -func (s *CreateKeyInput) SetOrigin(v string) *CreateKeyInput { - s.Origin = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *CreateKeyInput) SetPolicy(v string) *CreateKeyInput { - s.Policy = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateKeyInput) SetTags(v []*Tag) *CreateKeyInput { - s.Tags = v - return s -} - -type CreateKeyOutput struct { - _ struct{} `type:"structure"` - - // Metadata associated with the KMS key. - KeyMetadata *KeyMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateKeyOutput) GoString() string { - return s.String() -} - -// SetKeyMetadata sets the KeyMetadata field's value. -func (s *CreateKeyOutput) SetKeyMetadata(v *KeyMetadata) *CreateKeyOutput { - s.KeyMetadata = v - return s -} - -// The request was rejected because the custom key store contains KMS keys. -// After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion -// operation to delete the KMS keys. After they are deleted, you can delete -// the custom key store. -type CustomKeyStoreHasCMKsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreHasCMKsException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreHasCMKsException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreHasCMKsException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreHasCMKsException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreHasCMKsException) Code() string { - return "CustomKeyStoreHasCMKsException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreHasCMKsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreHasCMKsException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreHasCMKsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreHasCMKsException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreHasCMKsException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because of the ConnectionState of the custom key -// store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores -// operation. -// -// This exception is thrown under the following conditions: -// -// * You requested the CreateKey or GenerateRandom operation in a custom -// key store that is not connected. These operations are valid only when -// the custom key store ConnectionState is CONNECTED. -// -// * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation -// on a custom key store that is not disconnected. This operation is valid -// only when the custom key store ConnectionState is DISCONNECTED. -// -// * You requested the ConnectCustomKeyStore operation on a custom key store -// with a ConnectionState of DISCONNECTING or FAILED. This operation is valid -// for all other ConnectionState values. -type CustomKeyStoreInvalidStateException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreInvalidStateException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreInvalidStateException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreInvalidStateException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreInvalidStateException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreInvalidStateException) Code() string { - return "CustomKeyStoreInvalidStateException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreInvalidStateException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreInvalidStateException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreInvalidStateException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreInvalidStateException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreInvalidStateException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified custom key store name is already -// assigned to another custom key store in the account. Try again with a custom -// key store name that is unique in the account. -type CustomKeyStoreNameInUseException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNameInUseException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNameInUseException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreNameInUseException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreNameInUseException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreNameInUseException) Code() string { - return "CustomKeyStoreNameInUseException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreNameInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreNameInUseException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreNameInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreNameInUseException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreNameInUseException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because KMS cannot find a custom key store with -// the specified key store name or ID. -type CustomKeyStoreNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoreNotFoundException) GoString() string { - return s.String() -} - -func newErrorCustomKeyStoreNotFoundException(v protocol.ResponseMetadata) error { - return &CustomKeyStoreNotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *CustomKeyStoreNotFoundException) Code() string { - return "CustomKeyStoreNotFoundException" -} - -// Message returns the exception's message. -func (s *CustomKeyStoreNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *CustomKeyStoreNotFoundException) OrigErr() error { - return nil -} - -func (s *CustomKeyStoreNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *CustomKeyStoreNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *CustomKeyStoreNotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains information about each custom key store in the custom key store -// list. -type CustomKeyStoresListEntry struct { - _ struct{} `type:"structure"` - - // A unique identifier for the CloudHSM cluster that is associated with the - // custom key store. - CloudHsmClusterId *string `min:"19" type:"string"` - - // Describes the connection error. This field appears in the response only when - // the ConnectionState is FAILED. For help resolving these errors, see How to - // Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in Key Management Service Developer Guide. - // - // Valid values are: - // - // * CLUSTER_NOT_FOUND - KMS cannot find the CloudHSM cluster with the specified - // cluster ID. - // - // * INSUFFICIENT_CLOUDHSM_HSMS - The associated CloudHSM cluster does not - // contain any active HSMs. To connect a custom key store to its CloudHSM - // cluster, the cluster must contain at least one active HSM. - // - // * INTERNAL_ERROR - KMS could not complete the request due to an internal - // error. Retry the request. For ConnectCustomKeyStore requests, disconnect - // the custom key store before trying to connect again. - // - // * INVALID_CREDENTIALS - KMS does not have the correct password for the - // kmsuser crypto user in the CloudHSM cluster. Before you can connect your - // custom key store to its CloudHSM cluster, you must change the kmsuser - // account password and update the key store password value for the custom - // key store. - // - // * NETWORK_ERRORS - Network errors are preventing KMS from connecting to - // the custom key store. - // - // * SUBNET_NOT_FOUND - A subnet in the CloudHSM cluster configuration was - // deleted. If KMS cannot find all of the subnets in the cluster configuration, - // attempts to connect the custom key store to the CloudHSM cluster fail. - // To fix this error, create a cluster from a recent backup and associate - // it with your custom key store. (This process creates a new cluster configuration - // with a VPC and private subnets.) For details, see How to Fix a Connection - // Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) - // in the Key Management Service Developer Guide. - // - // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated - // CloudHSM cluster due to too many failed password attempts. Before you - // can connect your custom key store to its CloudHSM cluster, you must change - // the kmsuser account password and update the key store password value for - // the custom key store. - // - // * USER_LOGGED_IN - The kmsuser CU account is logged into the the associated - // CloudHSM cluster. This prevents KMS from rotating the kmsuser account - // password and logging into the cluster. Before you can connect your custom - // key store to its CloudHSM cluster, you must log the kmsuser CU out of - // the cluster. If you changed the kmsuser password to log into the cluster, - // you must also and update the key store password value for the custom key - // store. For help, see How to Log Out and Reconnect (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#login-kmsuser-2) - // in the Key Management Service Developer Guide. - // - // * USER_NOT_FOUND - KMS cannot find a kmsuser CU account in the associated - // CloudHSM cluster. Before you can connect your custom key store to its - // CloudHSM cluster, you must create a kmsuser CU account in the cluster, - // and then update the key store password value for the custom key store. - ConnectionErrorCode *string `type:"string" enum:"ConnectionErrorCodeType"` - - // Indicates whether the custom key store is connected to its CloudHSM cluster. - // - // You can create and use KMS keys in your custom key stores only when its connection - // state is CONNECTED. - // - // The value is DISCONNECTED if the key store has never been connected or you - // use the DisconnectCustomKeyStore operation to disconnect it. If the value - // is CONNECTED but you are having trouble using the custom key store, make - // sure that its associated CloudHSM cluster is active and contains at least - // one active HSM. - // - // A value of FAILED indicates that an attempt to connect was unsuccessful. - // The ConnectionErrorCode field in the response indicates the cause of the - // failure. For help resolving a connection failure, see Troubleshooting a Custom - // Key Store (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html) - // in the Key Management Service Developer Guide. - ConnectionState *string `type:"string" enum:"ConnectionStateType"` - - // The date and time when the custom key store was created. - CreationDate *time.Time `type:"timestamp"` - - // A unique identifier for the custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // The user-specified friendly name for the custom key store. - CustomKeyStoreName *string `min:"1" type:"string"` - - // The trust anchor certificate of the associated CloudHSM cluster. When you - // initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), - // you create this certificate and save it in the customerCA.crt file. - TrustAnchorCertificate *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoresListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CustomKeyStoresListEntry) GoString() string { - return s.String() -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *CustomKeyStoresListEntry) SetCloudHsmClusterId(v string) *CustomKeyStoresListEntry { - s.CloudHsmClusterId = &v - return s -} - -// SetConnectionErrorCode sets the ConnectionErrorCode field's value. -func (s *CustomKeyStoresListEntry) SetConnectionErrorCode(v string) *CustomKeyStoresListEntry { - s.ConnectionErrorCode = &v - return s -} - -// SetConnectionState sets the ConnectionState field's value. -func (s *CustomKeyStoresListEntry) SetConnectionState(v string) *CustomKeyStoresListEntry { - s.ConnectionState = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *CustomKeyStoresListEntry) SetCreationDate(v time.Time) *CustomKeyStoresListEntry { - s.CreationDate = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *CustomKeyStoresListEntry) SetCustomKeyStoreId(v string) *CustomKeyStoresListEntry { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *CustomKeyStoresListEntry) SetCustomKeyStoreName(v string) *CustomKeyStoresListEntry { - s.CustomKeyStoreName = &v - return s -} - -// SetTrustAnchorCertificate sets the TrustAnchorCertificate field's value. -func (s *CustomKeyStoresListEntry) SetTrustAnchorCertificate(v string) *CustomKeyStoresListEntry { - s.TrustAnchorCertificate = &v - return s -} - -type DecryptInput struct { - _ struct{} `type:"structure"` - - // Ciphertext to be decrypted. The blob includes metadata. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - // - // CiphertextBlob is a required field - CiphertextBlob []byte `min:"1" type:"blob" required:"true"` - - // Specifies the encryption algorithm that will be used to decrypt the ciphertext. - // Specify the same algorithm that was used to encrypt the data. If you specify - // a different algorithm, the Decrypt operation fails. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. The default value, SYMMETRIC_DEFAULT, represents the - // only supported algorithm that is valid for symmetric encryption KMS keys. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context to use when decrypting the data. An encryption - // context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // with a symmetric encryption KMS key. The standard asymmetric encryption algorithms - // and HMAC algorithms that KMS uses do not support an encryption context. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the KMS key that KMS uses to decrypt the ciphertext. - // - // Enter a key ID of the KMS key that was used to encrypt the ciphertext. If - // you identify a different KMS key, the Decrypt operation throws an IncorrectKeyException. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. If you used a symmetric encryption KMS key, KMS can get - // the KMS key from metadata that it adds to the symmetric ciphertext blob. - // However, it is always recommended as a best practice. This practice ensures - // that you use the KMS key that you intend. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecryptInput"} - if s.CiphertextBlob == nil { - invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) - } - if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *DecryptInput) SetCiphertextBlob(v []byte) *DecryptInput { - s.CiphertextBlob = v - return s -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *DecryptInput) SetEncryptionAlgorithm(v string) *DecryptInput { - s.EncryptionAlgorithm = &v - return s -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *DecryptInput) SetEncryptionContext(v map[string]*string) *DecryptInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *DecryptInput) SetGrantTokens(v []*string) *DecryptInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DecryptInput) SetKeyId(v string) *DecryptInput { - s.KeyId = &v - return s -} - -type DecryptOutput struct { - _ struct{} `type:"structure"` - - // The encryption algorithm that was used to decrypt the ciphertext. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to decrypt the ciphertext. - KeyId *string `min:"1" type:"string"` - - // Decrypted plaintext data. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by DecryptOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DecryptOutput) GoString() string { - return s.String() -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *DecryptOutput) SetEncryptionAlgorithm(v string) *DecryptOutput { - s.EncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DecryptOutput) SetKeyId(v string) *DecryptOutput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *DecryptOutput) SetPlaintext(v []byte) *DecryptOutput { - s.Plaintext = v - return s -} - -type DeleteAliasInput struct { - _ struct{} `type:"structure"` - - // The alias to be deleted. The alias name must begin with alias/ followed by - // the alias name, such as alias/ExampleAlias. - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *DeleteAliasInput) SetAliasName(v string) *DeleteAliasInput { - s.AliasName = &v - return s -} - -type DeleteAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteAliasOutput) GoString() string { - return s.String() -} - -type DeleteCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the ID of the custom key store you want to delete. To find the ID of - // a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DeleteCustomKeyStoreInput) SetCustomKeyStoreId(v string) *DeleteCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type DeleteCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type DeleteImportedKeyMaterialInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key from which you are deleting imported key material. - // The Origin of the KMS key must be EXTERNAL. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteImportedKeyMaterialInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteImportedKeyMaterialInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DeleteImportedKeyMaterialInput) SetKeyId(v string) *DeleteImportedKeyMaterialInput { - s.KeyId = &v - return s -} - -type DeleteImportedKeyMaterialOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteImportedKeyMaterialOutput) GoString() string { - return s.String() -} - -// The system timed out while trying to fulfill the request. The request can -// be retried. -type DependencyTimeoutException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DependencyTimeoutException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DependencyTimeoutException) GoString() string { - return s.String() -} - -func newErrorDependencyTimeoutException(v protocol.ResponseMetadata) error { - return &DependencyTimeoutException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *DependencyTimeoutException) Code() string { - return "DependencyTimeoutException" -} - -// Message returns the exception's message. -func (s *DependencyTimeoutException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *DependencyTimeoutException) OrigErr() error { - return nil -} - -func (s *DependencyTimeoutException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *DependencyTimeoutException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *DependencyTimeoutException) RequestID() string { - return s.RespMetadata.RequestID -} - -type DescribeCustomKeyStoresInput struct { - _ struct{} `type:"structure"` - - // Gets only information about the specified custom key store. Enter the key - // store ID. - // - // By default, this operation gets information about all custom key stores in - // the account and Region. To limit the output to a particular custom key store, - // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, - // but not both. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Gets only information about the specified custom key store. Enter the friendly - // name of the custom key store. - // - // By default, this operation gets information about all custom key stores in - // the account and Region. To limit the output to a particular custom key store, - // you can use either the CustomKeyStoreId or CustomKeyStoreName parameter, - // but not both. - CustomKeyStoreName *string `min:"1" type:"string"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeCustomKeyStoresInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeCustomKeyStoresInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.CustomKeyStoreName != nil && len(*s.CustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreName", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreId(v string) *DescribeCustomKeyStoresInput { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomKeyStoreName sets the CustomKeyStoreName field's value. -func (s *DescribeCustomKeyStoresInput) SetCustomKeyStoreName(v string) *DescribeCustomKeyStoresInput { - s.CustomKeyStoreName = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *DescribeCustomKeyStoresInput) SetLimit(v int64) *DescribeCustomKeyStoresInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *DescribeCustomKeyStoresInput) SetMarker(v string) *DescribeCustomKeyStoresInput { - s.Marker = &v - return s -} - -type DescribeCustomKeyStoresOutput struct { - _ struct{} `type:"structure"` - - // Contains metadata about each custom key store. - CustomKeyStores []*CustomKeyStoresListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeCustomKeyStoresOutput) GoString() string { - return s.String() -} - -// SetCustomKeyStores sets the CustomKeyStores field's value. -func (s *DescribeCustomKeyStoresOutput) SetCustomKeyStores(v []*CustomKeyStoresListEntry) *DescribeCustomKeyStoresOutput { - s.CustomKeyStores = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *DescribeCustomKeyStoresOutput) SetNextMarker(v string) *DescribeCustomKeyStoresOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *DescribeCustomKeyStoresOutput) SetTruncated(v bool) *DescribeCustomKeyStoresOutput { - s.Truncated = &v - return s -} - -type DescribeKeyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Describes the specified KMS key. - // - // If you specify a predefined Amazon Web Services alias (an Amazon Web Services - // alias with no key ID), KMS associates the alias with an Amazon Web Services - // managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html##aws-managed-cmk) - // and returns its KeyId and Arn in the response. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *DescribeKeyInput) SetGrantTokens(v []*string) *DescribeKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *DescribeKeyInput) SetKeyId(v string) *DescribeKeyInput { - s.KeyId = &v - return s -} - -type DescribeKeyOutput struct { - _ struct{} `type:"structure"` - - // Metadata associated with the key. - KeyMetadata *KeyMetadata `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DescribeKeyOutput) GoString() string { - return s.String() -} - -// SetKeyMetadata sets the KeyMetadata field's value. -func (s *DescribeKeyOutput) SetKeyMetadata(v *KeyMetadata) *DescribeKeyOutput { - s.KeyMetadata = v - return s -} - -type DisableKeyInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key to disable. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DisableKeyInput) SetKeyId(v string) *DisableKeyInput { - s.KeyId = &v - return s -} - -type DisableKeyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyOutput) GoString() string { - return s.String() -} - -type DisableKeyRotationInput struct { - _ struct{} `type:"structure"` - - // Identifies a symmetric encryption KMS key. You cannot enable or disable automatic - // rotation of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html#asymmetric-cmks), - // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), - // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), - // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableKeyRotationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableKeyRotationInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *DisableKeyRotationInput) SetKeyId(v string) *DisableKeyRotationInput { - s.KeyId = &v - return s -} - -type DisableKeyRotationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisableKeyRotationOutput) GoString() string { - return s.String() -} - -// The request was rejected because the specified KMS key is not enabled. -type DisabledException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisabledException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisabledException) GoString() string { - return s.String() -} - -func newErrorDisabledException(v protocol.ResponseMetadata) error { - return &DisabledException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *DisabledException) Code() string { - return "DisabledException" -} - -// Message returns the exception's message. -func (s *DisabledException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *DisabledException) OrigErr() error { - return nil -} - -func (s *DisabledException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *DisabledException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *DisabledException) RequestID() string { - return s.RespMetadata.RequestID -} - -type DisconnectCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Enter the ID of the custom key store you want to disconnect. To find the - // ID of a custom key store, use the DescribeCustomKeyStores operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisconnectCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisconnectCustomKeyStoreInput"} - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *DisconnectCustomKeyStoreInput) SetCustomKeyStoreId(v string) *DisconnectCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -type DisconnectCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DisconnectCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type EnableKeyInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key to enable. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnableKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *EnableKeyInput) SetKeyId(v string) *EnableKeyInput { - s.KeyId = &v - return s -} - -type EnableKeyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyOutput) GoString() string { - return s.String() -} - -type EnableKeyRotationInput struct { - _ struct{} `type:"structure"` - - // Identifies a symmetric encryption KMS key. You cannot enable automatic rotation - // of asymmetric KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html), - // HMAC KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html), - // KMS keys with imported key material (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html), - // or KMS keys in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To enable or disable automatic rotation of a set of related multi-Region - // keys (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-manage.html#multi-region-rotate), - // set the property on the primary key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EnableKeyRotationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableKeyRotationInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *EnableKeyRotationInput) SetKeyId(v string) *EnableKeyRotationInput { - s.KeyId = &v - return s -} - -type EnableKeyRotationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EnableKeyRotationOutput) GoString() string { - return s.String() -} - -type EncryptInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption algorithm that KMS will use to encrypt the plaintext - // message. The algorithm must be compatible with the KMS key that you specify. - // - // This parameter is required only for asymmetric KMS keys. The default value, - // SYMMETRIC_DEFAULT, is the algorithm used for symmetric encryption KMS keys. - // If you are using an asymmetric KMS key, we recommend RSAES_OAEP_SHA_256. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context that will be used to encrypt the data. An - // encryption context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // with a symmetric encryption KMS key. The standard asymmetric encryption algorithms - // and HMAC algorithms that KMS uses do not support an encryption context. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the KMS key to use in the encryption operation. The KMS key must - // have a KeyUsage of ENCRYPT_DECRYPT. To find the KeyUsage of a KMS key, use - // the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Data to be encrypted. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by EncryptInput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - // - // Plaintext is a required field - Plaintext []byte `min:"1" type:"blob" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Plaintext == nil { - invalidParams.Add(request.NewErrParamRequired("Plaintext")) - } - if s.Plaintext != nil && len(s.Plaintext) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Plaintext", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *EncryptInput) SetEncryptionAlgorithm(v string) *EncryptInput { - s.EncryptionAlgorithm = &v - return s -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *EncryptInput) SetEncryptionContext(v map[string]*string) *EncryptInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *EncryptInput) SetGrantTokens(v []*string) *EncryptInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *EncryptInput) SetKeyId(v string) *EncryptInput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *EncryptInput) SetPlaintext(v []byte) *EncryptInput { - s.Plaintext = v - return s -} - -type EncryptOutput struct { - _ struct{} `type:"structure"` - - // The encrypted plaintext. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The encryption algorithm that was used to encrypt the plaintext. - EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to encrypt the plaintext. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s EncryptOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *EncryptOutput) SetCiphertextBlob(v []byte) *EncryptOutput { - s.CiphertextBlob = v - return s -} - -// SetEncryptionAlgorithm sets the EncryptionAlgorithm field's value. -func (s *EncryptOutput) SetEncryptionAlgorithm(v string) *EncryptOutput { - s.EncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *EncryptOutput) SetKeyId(v string) *EncryptOutput { - s.KeyId = &v - return s -} - -// The request was rejected because the specified import token is expired. Use -// GetParametersForImport to get a new import token and public key, use the -// new public key to encrypt the key material, and then try the request again. -type ExpiredImportTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpiredImportTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ExpiredImportTokenException) GoString() string { - return s.String() -} - -func newErrorExpiredImportTokenException(v protocol.ResponseMetadata) error { - return &ExpiredImportTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *ExpiredImportTokenException) Code() string { - return "ExpiredImportTokenException" -} - -// Message returns the exception's message. -func (s *ExpiredImportTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ExpiredImportTokenException) OrigErr() error { - return nil -} - -func (s *ExpiredImportTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *ExpiredImportTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *ExpiredImportTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -type GenerateDataKeyInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the data - // key. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the symmetric encryption KMS key that encrypts the data key. You - // cannot specify an asymmetric KMS key or a KMS key in a custom key store. - // To get the type and origin of your KMS key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the length of the data key. Use AES_128 to generate a 128-bit symmetric - // key, or AES_256 to generate a 256-bit symmetric key. - // - // You must specify either the KeySpec or the NumberOfBytes parameter (but not - // both) in every GenerateDataKey request. - KeySpec *string `type:"string" enum:"DataKeySpec"` - - // Specifies the length of the data key in bytes. For example, use the value - // 64 to generate a 512-bit data key (64 bytes is 512 bits). For 128-bit (16-byte) - // and 256-bit (32-byte) data keys, use the KeySpec parameter. - // - // You must specify either the KeySpec or the NumberOfBytes parameter (but not - // both) in every GenerateDataKey request. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyInput) SetGrantTokens(v []*string) *GenerateDataKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyInput) SetKeyId(v string) *GenerateDataKeyInput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GenerateDataKeyInput) SetKeySpec(v string) *GenerateDataKeyInput { - s.KeySpec = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateDataKeyInput) SetNumberOfBytes(v int64) *GenerateDataKeyInput { - s.NumberOfBytes = &v - return s -} - -type GenerateDataKeyOutput struct { - _ struct{} `type:"structure"` - - // The encrypted copy of the data key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. - KeyId *string `min:"1" type:"string"` - - // The plaintext data key. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. Use - // this data key to encrypt your data outside of KMS. Then, remove it from memory - // as soon as possible. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateDataKeyOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *GenerateDataKeyOutput) SetCiphertextBlob(v []byte) *GenerateDataKeyOutput { - s.CiphertextBlob = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyOutput) SetKeyId(v string) *GenerateDataKeyOutput { - s.KeyId = &v - return s -} - -// SetPlaintext sets the Plaintext field's value. -func (s *GenerateDataKeyOutput) SetPlaintext(v []byte) *GenerateDataKeyOutput { - s.Plaintext = v - return s -} - -type GenerateDataKeyPairInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the symmetric encryption KMS key that encrypts the private key - // in the data key pair. You cannot specify an asymmetric KMS key or a KMS key - // in a custom key store. To get the type and origin of your KMS key, use the - // DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Determines the type of data key pair that is generated. - // - // The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt - // and decrypt or to sign and verify (but not both), and the rule that permits - // you to use ECC KMS keys only to sign and verify, are not effective on data - // key pairs, which are used outside of KMS. - // - // KeyPairSpec is a required field - KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyPairInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.KeyPairSpec == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairSpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyPairInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyPairInput) SetGrantTokens(v []*string) *GenerateDataKeyPairInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairInput) SetKeyId(v string) *GenerateDataKeyPairInput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairInput) SetKeyPairSpec(v string) *GenerateDataKeyPairInput { - s.KeyPairSpec = &v - return s -} - -type GenerateDataKeyPairOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. - KeyId *string `min:"1" type:"string"` - - // The type of data key pair that was generated. - KeyPairSpec *string `type:"string" enum:"DataKeyPairSpec"` - - // The encrypted copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PrivateKeyCiphertextBlob is automatically base64 encoded/decoded by the SDK. - PrivateKeyCiphertextBlob []byte `min:"1" type:"blob"` - - // The plaintext copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // PrivateKeyPlaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateDataKeyPairOutput's - // String and GoString methods. - // - // PrivateKeyPlaintext is automatically base64 encoded/decoded by the SDK. - PrivateKeyPlaintext []byte `min:"1" type:"blob" sensitive:"true"` - - // The public key (in plaintext). When you use the HTTP API or the Amazon Web - // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairOutput) SetKeyId(v string) *GenerateDataKeyPairOutput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairOutput) SetKeyPairSpec(v string) *GenerateDataKeyPairOutput { - s.KeyPairSpec = &v - return s -} - -// SetPrivateKeyCiphertextBlob sets the PrivateKeyCiphertextBlob field's value. -func (s *GenerateDataKeyPairOutput) SetPrivateKeyCiphertextBlob(v []byte) *GenerateDataKeyPairOutput { - s.PrivateKeyCiphertextBlob = v - return s -} - -// SetPrivateKeyPlaintext sets the PrivateKeyPlaintext field's value. -func (s *GenerateDataKeyPairOutput) SetPrivateKeyPlaintext(v []byte) *GenerateDataKeyPairOutput { - s.PrivateKeyPlaintext = v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GenerateDataKeyPairOutput) SetPublicKey(v []byte) *GenerateDataKeyPairOutput { - s.PublicKey = v - return s -} - -type GenerateDataKeyPairWithoutPlaintextInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the private - // key in the data key pair. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the symmetric encryption KMS key that encrypts the private key - // in the data key pair. You cannot specify an asymmetric KMS key or a KMS key - // in a custom key store. To get the type and origin of your KMS key, use the - // DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Determines the type of data key pair that is generated. - // - // The KMS rule that restricts the use of asymmetric RSA KMS keys to encrypt - // and decrypt or to sign and verify (but not both), and the rule that permits - // you to use ECC KMS keys only to sign and verify, are not effective on data - // key pairs, which are used outside of KMS. - // - // KeyPairSpec is a required field - KeyPairSpec *string `type:"string" required:"true" enum:"DataKeyPairSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyPairWithoutPlaintextInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyPairWithoutPlaintextInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.KeyPairSpec == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairSpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyPairWithoutPlaintextInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetGrantTokens(v []*string) *GenerateDataKeyPairWithoutPlaintextInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyId(v string) *GenerateDataKeyPairWithoutPlaintextInput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyPairSpec(v string) *GenerateDataKeyPairWithoutPlaintextInput { - s.KeyPairSpec = &v - return s -} - -type GenerateDataKeyPairWithoutPlaintextOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the private key. - KeyId *string `min:"1" type:"string"` - - // The type of data key pair that was generated. - KeyPairSpec *string `type:"string" enum:"DataKeyPairSpec"` - - // The encrypted copy of the private key. When you use the HTTP API or the Amazon - // Web Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PrivateKeyCiphertextBlob is automatically base64 encoded/decoded by the SDK. - PrivateKeyCiphertextBlob []byte `min:"1" type:"blob"` - - // The public key (in plaintext). When you use the HTTP API or the Amazon Web - // Services CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyPairWithoutPlaintextOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetKeyId(v string) *GenerateDataKeyPairWithoutPlaintextOutput { - s.KeyId = &v - return s -} - -// SetKeyPairSpec sets the KeyPairSpec field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetKeyPairSpec(v string) *GenerateDataKeyPairWithoutPlaintextOutput { - s.KeyPairSpec = &v - return s -} - -// SetPrivateKeyCiphertextBlob sets the PrivateKeyCiphertextBlob field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetPrivateKeyCiphertextBlob(v []byte) *GenerateDataKeyPairWithoutPlaintextOutput { - s.PrivateKeyCiphertextBlob = v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GenerateDataKeyPairWithoutPlaintextOutput) SetPublicKey(v []byte) *GenerateDataKeyPairWithoutPlaintextOutput { - s.PublicKey = v - return s -} - -type GenerateDataKeyWithoutPlaintextInput struct { - _ struct{} `type:"structure"` - - // Specifies the encryption context that will be used when encrypting the data - // key. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - EncryptionContext map[string]*string `type:"map"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the symmetric encryption KMS key that encrypts the data key. You - // cannot specify an asymmetric KMS key or a KMS key in a custom key store. - // To get the type and origin of your KMS key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The length of the data key. Use AES_128 to generate a 128-bit symmetric key, - // or AES_256 to generate a 256-bit symmetric key. - KeySpec *string `type:"string" enum:"DataKeySpec"` - - // The length of the data key in bytes. For example, use the value 64 to generate - // a 512-bit data key (64 bytes is 512 bits). For common key lengths (128-bit - // and 256-bit symmetric keys), we recommend that you use the KeySpec field - // instead of this one. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateDataKeyWithoutPlaintextInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateDataKeyWithoutPlaintextInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptionContext sets the EncryptionContext field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetEncryptionContext(v map[string]*string) *GenerateDataKeyWithoutPlaintextInput { - s.EncryptionContext = v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetGrantTokens(v []*string) *GenerateDataKeyWithoutPlaintextInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetKeyId(v string) *GenerateDataKeyWithoutPlaintextInput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetKeySpec(v string) *GenerateDataKeyWithoutPlaintextInput { - s.KeySpec = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateDataKeyWithoutPlaintextInput) SetNumberOfBytes(v int64) *GenerateDataKeyWithoutPlaintextInput { - s.NumberOfBytes = &v - return s -} - -type GenerateDataKeyWithoutPlaintextOutput struct { - _ struct{} `type:"structure"` - - // The encrypted data key. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that encrypted the data key. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateDataKeyWithoutPlaintextOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *GenerateDataKeyWithoutPlaintextOutput) SetCiphertextBlob(v []byte) *GenerateDataKeyWithoutPlaintextOutput { - s.CiphertextBlob = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateDataKeyWithoutPlaintextOutput) SetKeyId(v string) *GenerateDataKeyWithoutPlaintextOutput { - s.KeyId = &v - return s -} - -type GenerateMacInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // The HMAC KMS key to use in the operation. The MAC algorithm computes the - // HMAC for the message and the key as described in RFC 2104 (https://datatracker.ietf.org/doc/html/rfc2104). - // - // To identify an HMAC KMS key, use the DescribeKey operation and see the KeySpec - // field in the response. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The MAC algorithm used in the operation. - // - // The algorithm must be compatible with the HMAC KMS key that you specify. - // To find the MAC algorithms that your HMAC KMS key supports, use the DescribeKey - // operation and see the MacAlgorithms field in the DescribeKey response. - // - // MacAlgorithm is a required field - MacAlgorithm *string `type:"string" required:"true" enum:"MacAlgorithmSpec"` - - // The message to be hashed. Specify a message of up to 4,096 bytes. - // - // GenerateMac and VerifyMac do not provide special handling for message digests. - // If you generate an HMAC for a hash digest of a message, you must verify the - // HMAC of the same hash digest. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateMacInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateMacInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateMacInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateMacInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateMacInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.MacAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("MacAlgorithm")) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GenerateMacInput) SetGrantTokens(v []*string) *GenerateMacInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateMacInput) SetKeyId(v string) *GenerateMacInput { - s.KeyId = &v - return s -} - -// SetMacAlgorithm sets the MacAlgorithm field's value. -func (s *GenerateMacInput) SetMacAlgorithm(v string) *GenerateMacInput { - s.MacAlgorithm = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *GenerateMacInput) SetMessage(v []byte) *GenerateMacInput { - s.Message = v - return s -} - -type GenerateMacOutput struct { - _ struct{} `type:"structure"` - - // The HMAC KMS key used in the operation. - KeyId *string `min:"1" type:"string"` - - // The hash-based message authentication code (HMAC) for the given message, - // key, and MAC algorithm. - // Mac is automatically base64 encoded/decoded by the SDK. - Mac []byte `min:"1" type:"blob"` - - // The MAC algorithm that was used to generate the HMAC. - MacAlgorithm *string `type:"string" enum:"MacAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateMacOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateMacOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *GenerateMacOutput) SetKeyId(v string) *GenerateMacOutput { - s.KeyId = &v - return s -} - -// SetMac sets the Mac field's value. -func (s *GenerateMacOutput) SetMac(v []byte) *GenerateMacOutput { - s.Mac = v - return s -} - -// SetMacAlgorithm sets the MacAlgorithm field's value. -func (s *GenerateMacOutput) SetMacAlgorithm(v string) *GenerateMacOutput { - s.MacAlgorithm = &v - return s -} - -type GenerateRandomInput struct { - _ struct{} `type:"structure"` - - // Generates the random byte string in the CloudHSM cluster that is associated - // with the specified custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html). - // To find the ID of a custom key store, use the DescribeCustomKeyStores operation. - CustomKeyStoreId *string `min:"1" type:"string"` - - // The length of the byte string. - NumberOfBytes *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GenerateRandomInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GenerateRandomInput"} - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.NumberOfBytes != nil && *s.NumberOfBytes < 1 { - invalidParams.Add(request.NewErrParamMinValue("NumberOfBytes", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *GenerateRandomInput) SetCustomKeyStoreId(v string) *GenerateRandomInput { - s.CustomKeyStoreId = &v - return s -} - -// SetNumberOfBytes sets the NumberOfBytes field's value. -func (s *GenerateRandomInput) SetNumberOfBytes(v int64) *GenerateRandomInput { - s.NumberOfBytes = &v - return s -} - -type GenerateRandomOutput struct { - _ struct{} `type:"structure"` - - // The random byte string. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // - // Plaintext is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GenerateRandomOutput's - // String and GoString methods. - // - // Plaintext is automatically base64 encoded/decoded by the SDK. - Plaintext []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GenerateRandomOutput) GoString() string { - return s.String() -} - -// SetPlaintext sets the Plaintext field's value. -func (s *GenerateRandomOutput) SetPlaintext(v []byte) *GenerateRandomOutput { - s.Plaintext = v - return s -} - -type GetKeyPolicyInput struct { - _ struct{} `type:"structure"` - - // Gets the key policy for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the name of the key policy. The only valid name is default. To - // get the names of key policies, use ListKeyPolicies. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetKeyPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetKeyPolicyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetKeyPolicyInput) SetKeyId(v string) *GetKeyPolicyInput { - s.KeyId = &v - return s -} - -// SetPolicyName sets the PolicyName field's value. -func (s *GetKeyPolicyInput) SetPolicyName(v string) *GetKeyPolicyInput { - s.PolicyName = &v - return s -} - -type GetKeyPolicyOutput struct { - _ struct{} `type:"structure"` - - // A key policy document in JSON format. - Policy *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyPolicyOutput) GoString() string { - return s.String() -} - -// SetPolicy sets the Policy field's value. -func (s *GetKeyPolicyOutput) SetPolicy(v string) *GetKeyPolicyOutput { - s.Policy = &v - return s -} - -type GetKeyRotationStatusInput struct { - _ struct{} `type:"structure"` - - // Gets the rotation status for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetKeyRotationStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetKeyRotationStatusInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetKeyRotationStatusInput) SetKeyId(v string) *GetKeyRotationStatusInput { - s.KeyId = &v - return s -} - -type GetKeyRotationStatusOutput struct { - _ struct{} `type:"structure"` - - // A Boolean value that specifies whether key rotation is enabled. - KeyRotationEnabled *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetKeyRotationStatusOutput) GoString() string { - return s.String() -} - -// SetKeyRotationEnabled sets the KeyRotationEnabled field's value. -func (s *GetKeyRotationStatusOutput) SetKeyRotationEnabled(v bool) *GetKeyRotationStatusOutput { - s.KeyRotationEnabled = &v - return s -} - -type GetParametersForImportInput struct { - _ struct{} `type:"structure"` - - // The identifier of the symmetric encryption KMS key into which you will import - // key material. The Origin of the KMS key must be EXTERNAL. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The algorithm you will use to encrypt the key material before importing it - // with ImportKeyMaterial. For more information, see Encrypt the Key Material - // (https://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) - // in the Key Management Service Developer Guide. - // - // WrappingAlgorithm is a required field - WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"` - - // The type of wrapping key (public key) to return in the response. Only 2048-bit - // RSA public keys are supported. - // - // WrappingKeySpec is a required field - WrappingKeySpec *string `type:"string" required:"true" enum:"WrappingKeySpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetParametersForImportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetParametersForImportInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.WrappingAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("WrappingAlgorithm")) - } - if s.WrappingKeySpec == nil { - invalidParams.Add(request.NewErrParamRequired("WrappingKeySpec")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *GetParametersForImportInput) SetKeyId(v string) *GetParametersForImportInput { - s.KeyId = &v - return s -} - -// SetWrappingAlgorithm sets the WrappingAlgorithm field's value. -func (s *GetParametersForImportInput) SetWrappingAlgorithm(v string) *GetParametersForImportInput { - s.WrappingAlgorithm = &v - return s -} - -// SetWrappingKeySpec sets the WrappingKeySpec field's value. -func (s *GetParametersForImportInput) SetWrappingKeySpec(v string) *GetParametersForImportInput { - s.WrappingKeySpec = &v - return s -} - -type GetParametersForImportOutput struct { - _ struct{} `type:"structure"` - - // The import token to send in a subsequent ImportKeyMaterial request. - // ImportToken is automatically base64 encoded/decoded by the SDK. - ImportToken []byte `min:"1" type:"blob"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key to use in a subsequent ImportKeyMaterial request. This is - // the same KMS key specified in the GetParametersForImport request. - KeyId *string `min:"1" type:"string"` - - // The time at which the import token and public key are no longer valid. After - // this time, you cannot use them to make an ImportKeyMaterial request and you - // must send another GetParametersForImport request to get new ones. - ParametersValidTo *time.Time `type:"timestamp"` - - // The public key to use to encrypt the key material before importing it with - // ImportKeyMaterial. - // - // PublicKey is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by GetParametersForImportOutput's - // String and GoString methods. - // - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetParametersForImportOutput) GoString() string { - return s.String() -} - -// SetImportToken sets the ImportToken field's value. -func (s *GetParametersForImportOutput) SetImportToken(v []byte) *GetParametersForImportOutput { - s.ImportToken = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetParametersForImportOutput) SetKeyId(v string) *GetParametersForImportOutput { - s.KeyId = &v - return s -} - -// SetParametersValidTo sets the ParametersValidTo field's value. -func (s *GetParametersForImportOutput) SetParametersValidTo(v time.Time) *GetParametersForImportOutput { - s.ParametersValidTo = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GetParametersForImportOutput) SetPublicKey(v []byte) *GetParametersForImportOutput { - s.PublicKey = v - return s -} - -type GetPublicKeyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the asymmetric KMS key that includes the public key. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *GetPublicKeyInput) SetGrantTokens(v []*string) *GetPublicKeyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetPublicKeyInput) SetKeyId(v string) *GetPublicKeyInput { - s.KeyId = &v - return s -} - -type GetPublicKeyOutput struct { - _ struct{} `type:"structure"` - - // Instead, use the KeySpec field in the GetPublicKey response. - // - // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend - // that you use the KeySpec field in your code. However, to avoid breaking changes, - // KMS will support both fields. - // - // Deprecated: This field has been deprecated. Instead, use the KeySpec field. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // The encryption algorithms that KMS supports for this key. - // - // This information is critical. If a public key encrypts data outside of KMS - // by using an unsupported encryption algorithm, the ciphertext cannot be decrypted. - // - // This field appears in the response only when the KeyUsage of the public key - // is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key from which the public key was downloaded. - KeyId *string `min:"1" type:"string"` - - // The type of the of the public key that was downloaded. - KeySpec *string `type:"string" enum:"KeySpec"` - - // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or - // SIGN_VERIFY. - // - // This information is critical. If a public key with SIGN_VERIFY key usage - // encrypts data outside of KMS, the ciphertext cannot be decrypted. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // The exported public key. - // - // The value is a DER-encoded X.509 public key, also known as SubjectPublicKeyInfo - // (SPKI), as defined in RFC 5280 (https://tools.ietf.org/html/rfc5280). When - // you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. - // Otherwise, it is not Base64-encoded. - // PublicKey is automatically base64 encoded/decoded by the SDK. - PublicKey []byte `min:"1" type:"blob"` - - // The signing algorithms that KMS supports for this key. - // - // This field appears in the response only when the KeyUsage of the public key - // is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPublicKeyOutput) GoString() string { - return s.String() -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *GetPublicKeyOutput) SetCustomerMasterKeySpec(v string) *GetPublicKeyOutput { - s.CustomerMasterKeySpec = &v - return s -} - -// SetEncryptionAlgorithms sets the EncryptionAlgorithms field's value. -func (s *GetPublicKeyOutput) SetEncryptionAlgorithms(v []*string) *GetPublicKeyOutput { - s.EncryptionAlgorithms = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GetPublicKeyOutput) SetKeyId(v string) *GetPublicKeyOutput { - s.KeyId = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *GetPublicKeyOutput) SetKeySpec(v string) *GetPublicKeyOutput { - s.KeySpec = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *GetPublicKeyOutput) SetKeyUsage(v string) *GetPublicKeyOutput { - s.KeyUsage = &v - return s -} - -// SetPublicKey sets the PublicKey field's value. -func (s *GetPublicKeyOutput) SetPublicKey(v []byte) *GetPublicKeyOutput { - s.PublicKey = v - return s -} - -// SetSigningAlgorithms sets the SigningAlgorithms field's value. -func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutput { - s.SigningAlgorithms = v - return s -} - -// Use this structure to allow cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) -// in the grant only when the operation request includes the specified encryption -// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). -// -// KMS applies the grant constraints only to cryptographic operations that support -// an encryption context, that is, all cryptographic operations with a symmetric -// encryption KMS key (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). -// Grant constraints are not applied to operations that do not support an encryption -// context, such as cryptographic operations with HMAC KMS keys or asymmetric -// KMS keys, and management operations, such as DescribeKey or RetireGrant. -// -// In a cryptographic operation, the encryption context in the decryption operation -// must be an exact, case-sensitive match for the keys and values in the encryption -// context of the encryption operation. Only the order of the pairs can vary. -// -// However, in a grant constraint, the key in each key-value pair is not case -// sensitive, but the value is case sensitive. -// -// To avoid confusion, do not use multiple encryption context pairs that differ -// only by case. To require a fully case-sensitive encryption context, use the -// kms:EncryptionContext: and kms:EncryptionContextKeys conditions in an IAM -// or key policy. For details, see kms:EncryptionContext: (https://docs.aws.amazon.com/kms/latest/developerguide/policy-conditions.html#conditions-kms-encryption-context) -// in the Key Management Service Developer Guide . -type GrantConstraints struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs that must match the encryption context in the cryptographic - // operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // request. The grant allows the operation only when the encryption context - // in the request is the same as the encryption context specified in this constraint. - EncryptionContextEquals map[string]*string `type:"map"` - - // A list of key-value pairs that must be included in the encryption context - // of the cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // request. The grant allows the cryptographic operation only when the encryption - // context in the request includes the key-value pairs specified in this constraint, - // although it can include additional key-value pairs. - EncryptionContextSubset map[string]*string `type:"map"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantConstraints) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantConstraints) GoString() string { - return s.String() -} - -// SetEncryptionContextEquals sets the EncryptionContextEquals field's value. -func (s *GrantConstraints) SetEncryptionContextEquals(v map[string]*string) *GrantConstraints { - s.EncryptionContextEquals = v - return s -} - -// SetEncryptionContextSubset sets the EncryptionContextSubset field's value. -func (s *GrantConstraints) SetEncryptionContextSubset(v map[string]*string) *GrantConstraints { - s.EncryptionContextSubset = v - return s -} - -// Contains information about a grant. -type GrantListEntry struct { - _ struct{} `type:"structure"` - - // A list of key-value pairs that must be present in the encryption context - // of certain subsequent operations that the grant allows. - Constraints *GrantConstraints `type:"structure"` - - // The date and time when the grant was created. - CreationDate *time.Time `type:"timestamp"` - - // The unique identifier for the grant. - GrantId *string `min:"1" type:"string"` - - // The identity that gets the permissions in the grant. - // - // The GranteePrincipal field in the ListGrants response usually contains the - // user or role designated as the grantee principal in the grant. However, when - // the grantee principal in the grant is an Amazon Web Services service, the - // GranteePrincipal field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), - // which might represent several different grantee principals. - GranteePrincipal *string `min:"1" type:"string"` - - // The Amazon Web Services account under which the grant was issued. - IssuingAccount *string `min:"1" type:"string"` - - // The unique identifier for the KMS key to which the grant applies. - KeyId *string `min:"1" type:"string"` - - // The friendly name that identifies the grant. If a name was provided in the - // CreateGrant request, that name is returned. Otherwise this value is null. - Name *string `min:"1" type:"string"` - - // The list of operations permitted by the grant. - Operations []*string `type:"list" enum:"GrantOperation"` - - // The principal that can retire the grant. - RetiringPrincipal *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GrantListEntry) GoString() string { - return s.String() -} - -// SetConstraints sets the Constraints field's value. -func (s *GrantListEntry) SetConstraints(v *GrantConstraints) *GrantListEntry { - s.Constraints = v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *GrantListEntry) SetCreationDate(v time.Time) *GrantListEntry { - s.CreationDate = &v - return s -} - -// SetGrantId sets the GrantId field's value. -func (s *GrantListEntry) SetGrantId(v string) *GrantListEntry { - s.GrantId = &v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *GrantListEntry) SetGranteePrincipal(v string) *GrantListEntry { - s.GranteePrincipal = &v - return s -} - -// SetIssuingAccount sets the IssuingAccount field's value. -func (s *GrantListEntry) SetIssuingAccount(v string) *GrantListEntry { - s.IssuingAccount = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *GrantListEntry) SetKeyId(v string) *GrantListEntry { - s.KeyId = &v - return s -} - -// SetName sets the Name field's value. -func (s *GrantListEntry) SetName(v string) *GrantListEntry { - s.Name = &v - return s -} - -// SetOperations sets the Operations field's value. -func (s *GrantListEntry) SetOperations(v []*string) *GrantListEntry { - s.Operations = v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *GrantListEntry) SetRetiringPrincipal(v string) *GrantListEntry { - s.RetiringPrincipal = &v - return s -} - -type ImportKeyMaterialInput struct { - _ struct{} `type:"structure"` - - // The encrypted key material to import. The key material must be encrypted - // with the public wrapping key that GetParametersForImport returned, using - // the wrapping algorithm that you specified in the same GetParametersForImport - // request. - // EncryptedKeyMaterial is automatically base64 encoded/decoded by the SDK. - // - // EncryptedKeyMaterial is a required field - EncryptedKeyMaterial []byte `min:"1" type:"blob" required:"true"` - - // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES, - // in which case you must include the ValidTo parameter. When this parameter - // is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter. - ExpirationModel *string `type:"string" enum:"ExpirationModelType"` - - // The import token that you received in the response to a previous GetParametersForImport - // request. It must be from the same response that contained the public key - // that you used to encrypt the key material. - // ImportToken is automatically base64 encoded/decoded by the SDK. - // - // ImportToken is a required field - ImportToken []byte `min:"1" type:"blob" required:"true"` - - // The identifier of the symmetric encryption KMS key that receives the imported - // key material. This must be the same KMS key specified in the KeyID parameter - // of the corresponding GetParametersForImport request. The Origin of the KMS - // key must be EXTERNAL. You cannot perform this operation on an asymmetric - // KMS key, an HMAC KMS key, a KMS key in a custom key store, or on a KMS key - // in a different Amazon Web Services account - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The time at which the imported key material expires. When the key material - // expires, KMS deletes the key material and the KMS key becomes unusable. You - // must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE. - // Otherwise it is required. - ValidTo *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ImportKeyMaterialInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportKeyMaterialInput"} - if s.EncryptedKeyMaterial == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptedKeyMaterial")) - } - if s.EncryptedKeyMaterial != nil && len(s.EncryptedKeyMaterial) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncryptedKeyMaterial", 1)) - } - if s.ImportToken == nil { - invalidParams.Add(request.NewErrParamRequired("ImportToken")) - } - if s.ImportToken != nil && len(s.ImportToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ImportToken", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEncryptedKeyMaterial sets the EncryptedKeyMaterial field's value. -func (s *ImportKeyMaterialInput) SetEncryptedKeyMaterial(v []byte) *ImportKeyMaterialInput { - s.EncryptedKeyMaterial = v - return s -} - -// SetExpirationModel sets the ExpirationModel field's value. -func (s *ImportKeyMaterialInput) SetExpirationModel(v string) *ImportKeyMaterialInput { - s.ExpirationModel = &v - return s -} - -// SetImportToken sets the ImportToken field's value. -func (s *ImportKeyMaterialInput) SetImportToken(v []byte) *ImportKeyMaterialInput { - s.ImportToken = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ImportKeyMaterialInput) SetKeyId(v string) *ImportKeyMaterialInput { - s.KeyId = &v - return s -} - -// SetValidTo sets the ValidTo field's value. -func (s *ImportKeyMaterialInput) SetValidTo(v time.Time) *ImportKeyMaterialInput { - s.ValidTo = &v - return s -} - -type ImportKeyMaterialOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ImportKeyMaterialOutput) GoString() string { - return s.String() -} - -// The request was rejected because the specified KMS key cannot decrypt the -// data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request -// must identify the same KMS key that was used to encrypt the ciphertext. -type IncorrectKeyException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyException) GoString() string { - return s.String() -} - -func newErrorIncorrectKeyException(v protocol.ResponseMetadata) error { - return &IncorrectKeyException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectKeyException) Code() string { - return "IncorrectKeyException" -} - -// Message returns the exception's message. -func (s *IncorrectKeyException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectKeyException) OrigErr() error { - return nil -} - -func (s *IncorrectKeyException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectKeyException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectKeyException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the key material in the request is, expired, -// invalid, or is not the same key material that was previously imported into -// this KMS key. -type IncorrectKeyMaterialException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyMaterialException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectKeyMaterialException) GoString() string { - return s.String() -} - -func newErrorIncorrectKeyMaterialException(v protocol.ResponseMetadata) error { - return &IncorrectKeyMaterialException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectKeyMaterialException) Code() string { - return "IncorrectKeyMaterialException" -} - -// Message returns the exception's message. -func (s *IncorrectKeyMaterialException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectKeyMaterialException) OrigErr() error { - return nil -} - -func (s *IncorrectKeyMaterialException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectKeyMaterialException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectKeyMaterialException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the trust anchor certificate in the request -// is not the trust anchor certificate for the specified CloudHSM cluster. -// -// When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), -// you create the trust anchor certificate and save it in the customerCA.crt -// file. -type IncorrectTrustAnchorException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectTrustAnchorException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s IncorrectTrustAnchorException) GoString() string { - return s.String() -} - -func newErrorIncorrectTrustAnchorException(v protocol.ResponseMetadata) error { - return &IncorrectTrustAnchorException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *IncorrectTrustAnchorException) Code() string { - return "IncorrectTrustAnchorException" -} - -// Message returns the exception's message. -func (s *IncorrectTrustAnchorException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *IncorrectTrustAnchorException) OrigErr() error { - return nil -} - -func (s *IncorrectTrustAnchorException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *IncorrectTrustAnchorException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *IncorrectTrustAnchorException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because an internal exception occurred. The request -// can be retried. -type InternalException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InternalException) GoString() string { - return s.String() -} - -func newErrorInternalException(v protocol.ResponseMetadata) error { - return &InternalException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InternalException) Code() string { - return "KMSInternalException" -} - -// Message returns the exception's message. -func (s *InternalException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalException) OrigErr() error { - return nil -} - -func (s *InternalException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InternalException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InternalException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified alias name is not valid. -type InvalidAliasNameException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidAliasNameException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidAliasNameException) GoString() string { - return s.String() -} - -func newErrorInvalidAliasNameException(v protocol.ResponseMetadata) error { - return &InvalidAliasNameException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidAliasNameException) Code() string { - return "InvalidAliasNameException" -} - -// Message returns the exception's message. -func (s *InvalidAliasNameException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidAliasNameException) OrigErr() error { - return nil -} - -func (s *InvalidAliasNameException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidAliasNameException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidAliasNameException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because a specified ARN, or an ARN in a key policy, -// is not valid. -type InvalidArnException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidArnException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidArnException) GoString() string { - return s.String() -} - -func newErrorInvalidArnException(v protocol.ResponseMetadata) error { - return &InvalidArnException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidArnException) Code() string { - return "InvalidArnException" -} - -// Message returns the exception's message. -func (s *InvalidArnException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidArnException) OrigErr() error { - return nil -} - -func (s *InvalidArnException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidArnException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidArnException) RequestID() string { - return s.RespMetadata.RequestID -} - -// From the Decrypt or ReEncrypt operation, the request was rejected because -// the specified ciphertext, or additional authenticated data incorporated into -// the ciphertext, such as the encryption context, is corrupted, missing, or -// otherwise invalid. -// -// From the ImportKeyMaterial operation, the request was rejected because KMS -// could not decrypt the encrypted (wrapped) key material. -type InvalidCiphertextException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidCiphertextException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidCiphertextException) GoString() string { - return s.String() -} - -func newErrorInvalidCiphertextException(v protocol.ResponseMetadata) error { - return &InvalidCiphertextException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidCiphertextException) Code() string { - return "InvalidCiphertextException" -} - -// Message returns the exception's message. -func (s *InvalidCiphertextException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidCiphertextException) OrigErr() error { - return nil -} - -func (s *InvalidCiphertextException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidCiphertextException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidCiphertextException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified GrantId is not valid. -type InvalidGrantIdException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantIdException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantIdException) GoString() string { - return s.String() -} - -func newErrorInvalidGrantIdException(v protocol.ResponseMetadata) error { - return &InvalidGrantIdException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidGrantIdException) Code() string { - return "InvalidGrantIdException" -} - -// Message returns the exception's message. -func (s *InvalidGrantIdException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidGrantIdException) OrigErr() error { - return nil -} - -func (s *InvalidGrantIdException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidGrantIdException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidGrantIdException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the specified grant token is not valid. -type InvalidGrantTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidGrantTokenException) GoString() string { - return s.String() -} - -func newErrorInvalidGrantTokenException(v protocol.ResponseMetadata) error { - return &InvalidGrantTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidGrantTokenException) Code() string { - return "InvalidGrantTokenException" -} - -// Message returns the exception's message. -func (s *InvalidGrantTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidGrantTokenException) OrigErr() error { - return nil -} - -func (s *InvalidGrantTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidGrantTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidGrantTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the provided import token is invalid or -// is associated with a different KMS key. -type InvalidImportTokenException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidImportTokenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidImportTokenException) GoString() string { - return s.String() -} - -func newErrorInvalidImportTokenException(v protocol.ResponseMetadata) error { - return &InvalidImportTokenException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidImportTokenException) Code() string { - return "InvalidImportTokenException" -} - -// Message returns the exception's message. -func (s *InvalidImportTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidImportTokenException) OrigErr() error { - return nil -} - -func (s *InvalidImportTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidImportTokenException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidImportTokenException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected for one of the following reasons: -// -// * The KeyUsage value of the KMS key is incompatible with the API operation. -// -// * The encryption algorithm or signing algorithm specified for the operation -// is incompatible with the type of key material in the KMS key (KeySpec). -// -// For encrypting, decrypting, re-encrypting, and generating data keys, the -// KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the -// KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication -// codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage -// of a KMS key, use the DescribeKey operation. -// -// To find the encryption or signing algorithms supported for a particular KMS -// key, use the DescribeKey operation. -type InvalidKeyUsageException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidKeyUsageException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidKeyUsageException) GoString() string { - return s.String() -} - -func newErrorInvalidKeyUsageException(v protocol.ResponseMetadata) error { - return &InvalidKeyUsageException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidKeyUsageException) Code() string { - return "InvalidKeyUsageException" -} - -// Message returns the exception's message. -func (s *InvalidKeyUsageException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidKeyUsageException) OrigErr() error { - return nil -} - -func (s *InvalidKeyUsageException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidKeyUsageException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidKeyUsageException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the marker that specifies where pagination -// should next begin is not valid. -type InvalidMarkerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidMarkerException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidMarkerException) GoString() string { - return s.String() -} - -func newErrorInvalidMarkerException(v protocol.ResponseMetadata) error { - return &InvalidMarkerException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidMarkerException) Code() string { - return "InvalidMarkerException" -} - -// Message returns the exception's message. -func (s *InvalidMarkerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidMarkerException) OrigErr() error { - return nil -} - -func (s *InvalidMarkerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidMarkerException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidMarkerException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the state of the specified resource is not -// valid for this request. -// -// For more information about how key state affects the use of a KMS key, see -// Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) -// in the Key Management Service Developer Guide . -type InvalidStateException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidStateException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s InvalidStateException) GoString() string { - return s.String() -} - -func newErrorInvalidStateException(v protocol.ResponseMetadata) error { - return &InvalidStateException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *InvalidStateException) Code() string { - return "KMSInvalidStateException" -} - -// Message returns the exception's message. -func (s *InvalidStateException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidStateException) OrigErr() error { - return nil -} - -func (s *InvalidStateException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidStateException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *InvalidStateException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the HMAC verification failed. HMAC verification -// fails when the HMAC computed by using the specified message, HMAC KMS key, -// and MAC algorithm does not match the HMAC specified in the request. -type KMSInvalidMacException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidMacException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidMacException) GoString() string { - return s.String() -} - -func newErrorKMSInvalidMacException(v protocol.ResponseMetadata) error { - return &KMSInvalidMacException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *KMSInvalidMacException) Code() string { - return "KMSInvalidMacException" -} - -// Message returns the exception's message. -func (s *KMSInvalidMacException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *KMSInvalidMacException) OrigErr() error { - return nil -} - -func (s *KMSInvalidMacException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *KMSInvalidMacException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *KMSInvalidMacException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because the signature verification failed. Signature -// verification fails when it cannot confirm that signature was produced by -// signing the specified message with the specified KMS key and signing algorithm. -type KMSInvalidSignatureException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidSignatureException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KMSInvalidSignatureException) GoString() string { - return s.String() -} - -func newErrorKMSInvalidSignatureException(v protocol.ResponseMetadata) error { - return &KMSInvalidSignatureException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *KMSInvalidSignatureException) Code() string { - return "KMSInvalidSignatureException" -} - -// Message returns the exception's message. -func (s *KMSInvalidSignatureException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *KMSInvalidSignatureException) OrigErr() error { - return nil -} - -func (s *KMSInvalidSignatureException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *KMSInvalidSignatureException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *KMSInvalidSignatureException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Contains information about each entry in the key list. -type KeyListEntry struct { - _ struct{} `type:"structure"` - - // ARN of the key. - KeyArn *string `min:"20" type:"string"` - - // Unique identifier of the key. - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyListEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyListEntry) GoString() string { - return s.String() -} - -// SetKeyArn sets the KeyArn field's value. -func (s *KeyListEntry) SetKeyArn(v string) *KeyListEntry { - s.KeyArn = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *KeyListEntry) SetKeyId(v string) *KeyListEntry { - s.KeyId = &v - return s -} - -// Contains metadata about a KMS key. -// -// This data type is used as a response element for the CreateKey and DescribeKey -// operations. -type KeyMetadata struct { - _ struct{} `type:"structure"` - - // The twelve-digit account ID of the Amazon Web Services account that owns - // the KMS key. - AWSAccountId *string `type:"string"` - - // The Amazon Resource Name (ARN) of the KMS key. For examples, see Key Management - // Service (KMS) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) - // in the Example ARNs section of the Amazon Web Services General Reference. - Arn *string `min:"20" type:"string"` - - // The cluster ID of the CloudHSM cluster that contains the key material for - // the KMS key. When you create a KMS key in a custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html), - // KMS creates the key material for the KMS key in the associated CloudHSM cluster. - // This value is present only when the KMS key is created in a custom key store. - CloudHsmClusterId *string `min:"19" type:"string"` - - // The date and time when the KMS key was created. - CreationDate *time.Time `type:"timestamp"` - - // A unique identifier for the custom key store (https://docs.aws.amazon.com/kms/latest/developerguide/custom-key-store-overview.html) - // that contains the KMS key. This value is present only when the KMS key is - // created in a custom key store. - CustomKeyStoreId *string `min:"1" type:"string"` - - // Instead, use the KeySpec field. - // - // The KeySpec and CustomerMasterKeySpec fields have the same value. We recommend - // that you use the KeySpec field in your code. However, to avoid breaking changes, - // KMS will support both fields. - // - // Deprecated: This field has been deprecated. Instead, use the KeySpec field. - CustomerMasterKeySpec *string `deprecated:"true" type:"string" enum:"CustomerMasterKeySpec"` - - // The date and time after which KMS deletes this KMS key. This value is present - // only when the KMS key is scheduled for deletion, that is, when its KeyState - // is PendingDeletion. - // - // When the primary key in a multi-Region key is scheduled for deletion but - // still has replica keys, its key state is PendingReplicaDeletion and the length - // of its waiting period is displayed in the PendingDeletionWindowInDays field. - DeletionDate *time.Time `type:"timestamp"` - - // The description of the KMS key. - Description *string `type:"string"` - - // Specifies whether the KMS key is enabled. When KeyState is Enabled this value - // is true, otherwise it is false. - Enabled *bool `type:"boolean"` - - // The encryption algorithms that the KMS key supports. You cannot use the KMS - // key with other encryption algorithms within KMS. - // - // This value is present only when the KeyUsage of the KMS key is ENCRYPT_DECRYPT. - EncryptionAlgorithms []*string `type:"list" enum:"EncryptionAlgorithmSpec"` - - // Specifies whether the KMS key's key material expires. This value is present - // only when Origin is EXTERNAL, otherwise this value is omitted. - ExpirationModel *string `type:"string" enum:"ExpirationModelType"` - - // The globally unique identifier for the KMS key. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The manager of the KMS key. KMS keys in your Amazon Web Services account - // are either customer managed or Amazon Web Services managed. For more information - // about the difference, see KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#kms_keys) - // in the Key Management Service Developer Guide. - KeyManager *string `type:"string" enum:"KeyManagerType"` - - // Describes the type of key material in the KMS key. - KeySpec *string `type:"string" enum:"KeySpec"` - - // The current status of the KMS key. - // - // For more information about how key state affects the use of a KMS key, see - // Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide. - KeyState *string `type:"string" enum:"KeyState"` - - // The cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) - // for which you can use the KMS key. - KeyUsage *string `type:"string" enum:"KeyUsageType"` - - // The message authentication code (MAC) algorithm that the HMAC KMS key supports. - // - // This value is present only when the KeyUsage of the KMS key is GENERATE_VERIFY_MAC. - MacAlgorithms []*string `type:"list" enum:"MacAlgorithmSpec"` - - // Indicates whether the KMS key is a multi-Region (True) or regional (False) - // key. This value is True for multi-Region primary and replica keys and False - // for regional KMS keys. - // - // For more information about multi-Region keys, see Multi-Region keys in KMS - // (https://docs.aws.amazon.com/kms/latest/developerguide/multi-region-keys-overview.html) - // in the Key Management Service Developer Guide. - MultiRegion *bool `type:"boolean"` - - // Lists the primary and replica keys in same multi-Region key. This field is - // present only when the value of the MultiRegion field is True. - // - // For more information about any listed KMS key, use the DescribeKey operation. - // - // * MultiRegionKeyType indicates whether the KMS key is a PRIMARY or REPLICA - // key. - // - // * PrimaryKey displays the key ARN and Region of the primary key. This - // field displays the current KMS key if it is the primary key. - // - // * ReplicaKeys displays the key ARNs and Regions of all replica keys. This - // field includes the current KMS key if it is a replica key. - MultiRegionConfiguration *MultiRegionConfiguration `type:"structure"` - - // The source of the key material for the KMS key. When this value is AWS_KMS, - // KMS created the key material. When this value is EXTERNAL, the key material - // was imported or the KMS key doesn't have any key material. When this value - // is AWS_CLOUDHSM, the key material was created in the CloudHSM cluster associated - // with a custom key store. - Origin *string `type:"string" enum:"OriginType"` - - // The waiting period before the primary key in a multi-Region key is deleted. - // This waiting period begins when the last of its replica keys is deleted. - // This value is present only when the KeyState of the KMS key is PendingReplicaDeletion. - // That indicates that the KMS key is the primary key in a multi-Region key, - // it is scheduled for deletion, and it still has existing replica keys. - // - // When a single-Region KMS key or a multi-Region replica key is scheduled for - // deletion, its deletion date is displayed in the DeletionDate field. However, - // when the primary key in a multi-Region key is scheduled for deletion, its - // waiting period doesn't begin until all of its replica keys are deleted. This - // value displays that waiting period. When the last replica key in the multi-Region - // key is deleted, the KeyState of the scheduled primary key changes from PendingReplicaDeletion - // to PendingDeletion and the deletion date appears in the DeletionDate field. - PendingDeletionWindowInDays *int64 `min:"1" type:"integer"` - - // The signing algorithms that the KMS key supports. You cannot use the KMS - // key with other signing algorithms within KMS. - // - // This field appears only when the KeyUsage of the KMS key is SIGN_VERIFY. - SigningAlgorithms []*string `type:"list" enum:"SigningAlgorithmSpec"` - - // The time at which the imported key material expires. When the key material - // expires, KMS deletes the key material and the KMS key becomes unusable. This - // value is present only for KMS keys whose Origin is EXTERNAL and whose ExpirationModel - // is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. - ValidTo *time.Time `type:"timestamp"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyMetadata) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyMetadata) GoString() string { - return s.String() -} - -// SetAWSAccountId sets the AWSAccountId field's value. -func (s *KeyMetadata) SetAWSAccountId(v string) *KeyMetadata { - s.AWSAccountId = &v - return s -} - -// SetArn sets the Arn field's value. -func (s *KeyMetadata) SetArn(v string) *KeyMetadata { - s.Arn = &v - return s -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *KeyMetadata) SetCloudHsmClusterId(v string) *KeyMetadata { - s.CloudHsmClusterId = &v - return s -} - -// SetCreationDate sets the CreationDate field's value. -func (s *KeyMetadata) SetCreationDate(v time.Time) *KeyMetadata { - s.CreationDate = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *KeyMetadata) SetCustomKeyStoreId(v string) *KeyMetadata { - s.CustomKeyStoreId = &v - return s -} - -// SetCustomerMasterKeySpec sets the CustomerMasterKeySpec field's value. -func (s *KeyMetadata) SetCustomerMasterKeySpec(v string) *KeyMetadata { - s.CustomerMasterKeySpec = &v - return s -} - -// SetDeletionDate sets the DeletionDate field's value. -func (s *KeyMetadata) SetDeletionDate(v time.Time) *KeyMetadata { - s.DeletionDate = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *KeyMetadata) SetDescription(v string) *KeyMetadata { - s.Description = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *KeyMetadata) SetEnabled(v bool) *KeyMetadata { - s.Enabled = &v - return s -} - -// SetEncryptionAlgorithms sets the EncryptionAlgorithms field's value. -func (s *KeyMetadata) SetEncryptionAlgorithms(v []*string) *KeyMetadata { - s.EncryptionAlgorithms = v - return s -} - -// SetExpirationModel sets the ExpirationModel field's value. -func (s *KeyMetadata) SetExpirationModel(v string) *KeyMetadata { - s.ExpirationModel = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *KeyMetadata) SetKeyId(v string) *KeyMetadata { - s.KeyId = &v - return s -} - -// SetKeyManager sets the KeyManager field's value. -func (s *KeyMetadata) SetKeyManager(v string) *KeyMetadata { - s.KeyManager = &v - return s -} - -// SetKeySpec sets the KeySpec field's value. -func (s *KeyMetadata) SetKeySpec(v string) *KeyMetadata { - s.KeySpec = &v - return s -} - -// SetKeyState sets the KeyState field's value. -func (s *KeyMetadata) SetKeyState(v string) *KeyMetadata { - s.KeyState = &v - return s -} - -// SetKeyUsage sets the KeyUsage field's value. -func (s *KeyMetadata) SetKeyUsage(v string) *KeyMetadata { - s.KeyUsage = &v - return s -} - -// SetMacAlgorithms sets the MacAlgorithms field's value. -func (s *KeyMetadata) SetMacAlgorithms(v []*string) *KeyMetadata { - s.MacAlgorithms = v - return s -} - -// SetMultiRegion sets the MultiRegion field's value. -func (s *KeyMetadata) SetMultiRegion(v bool) *KeyMetadata { - s.MultiRegion = &v - return s -} - -// SetMultiRegionConfiguration sets the MultiRegionConfiguration field's value. -func (s *KeyMetadata) SetMultiRegionConfiguration(v *MultiRegionConfiguration) *KeyMetadata { - s.MultiRegionConfiguration = v - return s -} - -// SetOrigin sets the Origin field's value. -func (s *KeyMetadata) SetOrigin(v string) *KeyMetadata { - s.Origin = &v - return s -} - -// SetPendingDeletionWindowInDays sets the PendingDeletionWindowInDays field's value. -func (s *KeyMetadata) SetPendingDeletionWindowInDays(v int64) *KeyMetadata { - s.PendingDeletionWindowInDays = &v - return s -} - -// SetSigningAlgorithms sets the SigningAlgorithms field's value. -func (s *KeyMetadata) SetSigningAlgorithms(v []*string) *KeyMetadata { - s.SigningAlgorithms = v - return s -} - -// SetValidTo sets the ValidTo field's value. -func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata { - s.ValidTo = &v - return s -} - -// The request was rejected because the specified KMS key was not available. -// You can retry the request. -type KeyUnavailableException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyUnavailableException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s KeyUnavailableException) GoString() string { - return s.String() -} - -func newErrorKeyUnavailableException(v protocol.ResponseMetadata) error { - return &KeyUnavailableException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *KeyUnavailableException) Code() string { - return "KeyUnavailableException" -} - -// Message returns the exception's message. -func (s *KeyUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *KeyUnavailableException) OrigErr() error { - return nil -} - -func (s *KeyUnavailableException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *KeyUnavailableException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *KeyUnavailableException) RequestID() string { - return s.RespMetadata.RequestID -} - -// The request was rejected because a quota was exceeded. For more information, -// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) -// in the Key Management Service Developer Guide. -type LimitExceededException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s LimitExceededException) GoString() string { - return s.String() -} - -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *LimitExceededException) Code() string { - return "LimitExceededException" -} - -// Message returns the exception's message. -func (s *LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *LimitExceededException) OrigErr() error { - return nil -} - -func (s *LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *LimitExceededException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *LimitExceededException) RequestID() string { - return s.RespMetadata.RequestID -} - -type ListAliasesInput struct { - _ struct{} `type:"structure"` - - // Lists only aliases that are associated with the specified KMS key. Enter - // a KMS key in your Amazon Web Services account. - // - // This parameter is optional. If you omit it, ListAliases returns all aliases - // in the account and Region. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - KeyId *string `min:"1" type:"string"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAliasesInput"} - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListAliasesInput) SetKeyId(v string) *ListAliasesInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListAliasesInput) SetLimit(v int64) *ListAliasesInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListAliasesInput) SetMarker(v string) *ListAliasesInput { - s.Marker = &v - return s -} - -type ListAliasesOutput struct { - _ struct{} `type:"structure"` - - // A list of aliases. - Aliases []*AliasListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListAliasesOutput) GoString() string { - return s.String() -} - -// SetAliases sets the Aliases field's value. -func (s *ListAliasesOutput) SetAliases(v []*AliasListEntry) *ListAliasesOutput { - s.Aliases = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListAliasesOutput) SetNextMarker(v string) *ListAliasesOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListAliasesOutput) SetTruncated(v bool) *ListAliasesOutput { - s.Truncated = &v - return s -} - -type ListGrantsInput struct { - _ struct{} `type:"structure"` - - // Returns only the grant with the specified grant ID. The grant ID uniquely - // identifies the grant. - GrantId *string `min:"1" type:"string"` - - // Returns only grants where the specified principal is the grantee principal - // for the grant. - GranteePrincipal *string `min:"1" type:"string"` - - // Returns only grants for the specified KMS key. This parameter is required. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListGrantsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListGrantsInput"} - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.GranteePrincipal != nil && len(*s.GranteePrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GranteePrincipal", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *ListGrantsInput) SetGrantId(v string) *ListGrantsInput { - s.GrantId = &v - return s -} - -// SetGranteePrincipal sets the GranteePrincipal field's value. -func (s *ListGrantsInput) SetGranteePrincipal(v string) *ListGrantsInput { - s.GranteePrincipal = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ListGrantsInput) SetKeyId(v string) *ListGrantsInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListGrantsInput) SetLimit(v int64) *ListGrantsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListGrantsInput) SetMarker(v string) *ListGrantsInput { - s.Marker = &v - return s -} - -type ListGrantsResponse struct { - _ struct{} `type:"structure"` - - // A list of grants. - Grants []*GrantListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsResponse) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListGrantsResponse) GoString() string { - return s.String() -} - -// SetGrants sets the Grants field's value. -func (s *ListGrantsResponse) SetGrants(v []*GrantListEntry) *ListGrantsResponse { - s.Grants = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListGrantsResponse) SetNextMarker(v string) *ListGrantsResponse { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListGrantsResponse) SetTruncated(v bool) *ListGrantsResponse { - s.Truncated = &v - return s -} - -type ListKeyPoliciesInput struct { - _ struct{} `type:"structure"` - - // Gets the names of key policies for the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 1000, inclusive. If you do not include a value, it defaults to 100. - // - // Only one policy can be attached to a key. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListKeyPoliciesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListKeyPoliciesInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListKeyPoliciesInput) SetKeyId(v string) *ListKeyPoliciesInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListKeyPoliciesInput) SetLimit(v int64) *ListKeyPoliciesInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListKeyPoliciesInput) SetMarker(v string) *ListKeyPoliciesInput { - s.Marker = &v - return s -} - -type ListKeyPoliciesOutput struct { - _ struct{} `type:"structure"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A list of key policy names. The only valid value is default. - PolicyNames []*string `type:"list"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeyPoliciesOutput) GoString() string { - return s.String() -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListKeyPoliciesOutput) SetNextMarker(v string) *ListKeyPoliciesOutput { - s.NextMarker = &v - return s -} - -// SetPolicyNames sets the PolicyNames field's value. -func (s *ListKeyPoliciesOutput) SetPolicyNames(v []*string) *ListKeyPoliciesOutput { - s.PolicyNames = v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListKeyPoliciesOutput) SetTruncated(v bool) *ListKeyPoliciesOutput { - s.Truncated = &v - return s -} - -type ListKeysInput struct { - _ struct{} `type:"structure"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 1000, inclusive. If you do not include a value, it defaults to 100. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListKeysInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListKeysInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListKeysInput) SetLimit(v int64) *ListKeysInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListKeysInput) SetMarker(v string) *ListKeysInput { - s.Marker = &v - return s -} - -type ListKeysOutput struct { - _ struct{} `type:"structure"` - - // A list of KMS keys. - Keys []*KeyListEntry `type:"list"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - NextMarker *string `min:"1" type:"string"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListKeysOutput) GoString() string { - return s.String() -} - -// SetKeys sets the Keys field's value. -func (s *ListKeysOutput) SetKeys(v []*KeyListEntry) *ListKeysOutput { - s.Keys = v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListKeysOutput) SetNextMarker(v string) *ListKeysOutput { - s.NextMarker = &v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListKeysOutput) SetTruncated(v bool) *ListKeysOutput { - s.Truncated = &v - return s -} - -type ListResourceTagsInput struct { - _ struct{} `type:"structure"` - - // Gets tags on the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 50, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - // - // Do not attempt to construct this value. Use only the value of NextMarker - // from the truncated response you just received. - Marker *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListResourceTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResourceTagsInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ListResourceTagsInput) SetKeyId(v string) *ListResourceTagsInput { - s.KeyId = &v - return s -} - -// SetLimit sets the Limit field's value. -func (s *ListResourceTagsInput) SetLimit(v int64) *ListResourceTagsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListResourceTagsInput) SetMarker(v string) *ListResourceTagsInput { - s.Marker = &v - return s -} - -type ListResourceTagsOutput struct { - _ struct{} `type:"structure"` - - // When Truncated is true, this element is present and contains the value to - // use for the Marker parameter in a subsequent request. - // - // Do not assume or infer any information from this value. - NextMarker *string `min:"1" type:"string"` - - // A list of tags. Each tag consists of a tag key and a tag value. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - Tags []*Tag `type:"list"` - - // A flag that indicates whether there are more items in the list. When this - // value is true, the list in this response is truncated. To get more items, - // pass the value of the NextMarker element in thisresponse to the Marker parameter - // in a subsequent request. - Truncated *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListResourceTagsOutput) GoString() string { - return s.String() -} - -// SetNextMarker sets the NextMarker field's value. -func (s *ListResourceTagsOutput) SetNextMarker(v string) *ListResourceTagsOutput { - s.NextMarker = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListResourceTagsOutput) SetTags(v []*Tag) *ListResourceTagsOutput { - s.Tags = v - return s -} - -// SetTruncated sets the Truncated field's value. -func (s *ListResourceTagsOutput) SetTruncated(v bool) *ListResourceTagsOutput { - s.Truncated = &v - return s -} - -type ListRetirableGrantsInput struct { - _ struct{} `type:"structure"` - - // Use this parameter to specify the maximum number of items to return. When - // this value is present, KMS does not return more than the specified number - // of items, but it might return fewer. - // - // This value is optional. If you include a value, it must be between 1 and - // 100, inclusive. If you do not include a value, it defaults to 50. - Limit *int64 `min:"1" type:"integer"` - - // Use this parameter in a subsequent request after you receive a response with - // truncated results. Set it to the value of NextMarker from the truncated response - // you just received. - Marker *string `min:"1" type:"string"` - - // The retiring principal for which to list grants. Enter a principal in your - // Amazon Web Services account. - // - // To specify the retiring principal, use the Amazon Resource Name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // of an Amazon Web Services principal. Valid Amazon Web Services principals - // include Amazon Web Services accounts (root), IAM users, federated users, - // and assumed role users. For examples of the ARN syntax for specifying a principal, - // see Amazon Web Services Identity and Access Management (IAM) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) - // in the Example ARNs section of the Amazon Web Services General Reference. - // - // RetiringPrincipal is a required field - RetiringPrincipal *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListRetirableGrantsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListRetirableGrantsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListRetirableGrantsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRetirableGrantsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.Marker != nil && len(*s.Marker) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) - } - if s.RetiringPrincipal == nil { - invalidParams.Add(request.NewErrParamRequired("RetiringPrincipal")) - } - if s.RetiringPrincipal != nil && len(*s.RetiringPrincipal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RetiringPrincipal", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLimit sets the Limit field's value. -func (s *ListRetirableGrantsInput) SetLimit(v int64) *ListRetirableGrantsInput { - s.Limit = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListRetirableGrantsInput) SetMarker(v string) *ListRetirableGrantsInput { - s.Marker = &v - return s -} - -// SetRetiringPrincipal sets the RetiringPrincipal field's value. -func (s *ListRetirableGrantsInput) SetRetiringPrincipal(v string) *ListRetirableGrantsInput { - s.RetiringPrincipal = &v - return s -} - -// The request was rejected because the specified policy is not syntactically -// or semantically correct. -type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MalformedPolicyDocumentException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MalformedPolicyDocumentException) GoString() string { - return s.String() -} - -func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { - return &MalformedPolicyDocumentException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *MalformedPolicyDocumentException) Code() string { - return "MalformedPolicyDocumentException" -} - -// Message returns the exception's message. -func (s *MalformedPolicyDocumentException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *MalformedPolicyDocumentException) OrigErr() error { - return nil -} - -func (s *MalformedPolicyDocumentException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *MalformedPolicyDocumentException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *MalformedPolicyDocumentException) RequestID() string { - return s.RespMetadata.RequestID -} - -// Describes the configuration of this multi-Region key. This field appears -// only when the KMS key is a primary or replica of a multi-Region key. -// -// For more information about any listed KMS key, use the DescribeKey operation. -type MultiRegionConfiguration struct { - _ struct{} `type:"structure"` - - // Indicates whether the KMS key is a PRIMARY or REPLICA key. - MultiRegionKeyType *string `type:"string" enum:"MultiRegionKeyType"` - - // Displays the key ARN and Region of the primary key. This field includes the - // current KMS key if it is the primary key. - PrimaryKey *MultiRegionKey `type:"structure"` - - // displays the key ARNs and Regions of all replica keys. This field includes - // the current KMS key if it is a replica key. - ReplicaKeys []*MultiRegionKey `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionConfiguration) GoString() string { - return s.String() -} - -// SetMultiRegionKeyType sets the MultiRegionKeyType field's value. -func (s *MultiRegionConfiguration) SetMultiRegionKeyType(v string) *MultiRegionConfiguration { - s.MultiRegionKeyType = &v - return s -} - -// SetPrimaryKey sets the PrimaryKey field's value. -func (s *MultiRegionConfiguration) SetPrimaryKey(v *MultiRegionKey) *MultiRegionConfiguration { - s.PrimaryKey = v - return s -} - -// SetReplicaKeys sets the ReplicaKeys field's value. -func (s *MultiRegionConfiguration) SetReplicaKeys(v []*MultiRegionKey) *MultiRegionConfiguration { - s.ReplicaKeys = v - return s -} - -// Describes the primary or replica key in a multi-Region key. -type MultiRegionKey struct { - _ struct{} `type:"structure"` - - // Displays the key ARN of a primary or replica key of a multi-Region key. - Arn *string `min:"20" type:"string"` - - // Displays the Amazon Web Services Region of a primary or replica key in a - // multi-Region key. - Region *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionKey) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s MultiRegionKey) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *MultiRegionKey) SetArn(v string) *MultiRegionKey { - s.Arn = &v - return s -} - -// SetRegion sets the Region field's value. -func (s *MultiRegionKey) SetRegion(v string) *MultiRegionKey { - s.Region = &v - return s -} - -// The request was rejected because the specified entity or resource could not -// be found. -type NotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotFoundException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s NotFoundException) GoString() string { - return s.String() -} - -func newErrorNotFoundException(v protocol.ResponseMetadata) error { - return &NotFoundException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *NotFoundException) Code() string { - return "NotFoundException" -} - -// Message returns the exception's message. -func (s *NotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *NotFoundException) OrigErr() error { - return nil -} - -func (s *NotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *NotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *NotFoundException) RequestID() string { - return s.RespMetadata.RequestID -} - -type PutKeyPolicyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. - // - // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // Sets the key policy on the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The key policy to attach to the KMS key. - // - // The key policy must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must allow the principal that is making the PutKeyPolicy request to make - // a subsequent PutKeyPolicy request on the KMS key. This reduces the risk - // that the KMS key becomes unmanageable. For more information, refer to - // the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide. - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Amazon Web Services Identity and Access Management User Guide. - // - // The key policy cannot exceed 32 kilobytes (32768 bytes). For more information, - // see Resource Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/resource-limits.html) - // in the Key Management Service Developer Guide. - // - // Policy is a required field - Policy *string `min:"1" type:"string" required:"true"` - - // The name of the key policy. The only valid value is default. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutKeyPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutKeyPolicyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *PutKeyPolicyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *PutKeyPolicyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *PutKeyPolicyInput) SetKeyId(v string) *PutKeyPolicyInput { - s.KeyId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *PutKeyPolicyInput) SetPolicy(v string) *PutKeyPolicyInput { - s.Policy = &v - return s -} - -// SetPolicyName sets the PolicyName field's value. -func (s *PutKeyPolicyInput) SetPolicyName(v string) *PutKeyPolicyInput { - s.PolicyName = &v - return s -} - -type PutKeyPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s PutKeyPolicyOutput) GoString() string { - return s.String() -} - -type ReEncryptInput struct { - _ struct{} `type:"structure"` - - // Ciphertext of the data to reencrypt. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - // - // CiphertextBlob is a required field - CiphertextBlob []byte `min:"1" type:"blob" required:"true"` - - // Specifies the encryption algorithm that KMS will use to reecrypt the data - // after it has decrypted it. The default value, SYMMETRIC_DEFAULT, represents - // the encryption algorithm used for symmetric encryption KMS keys. - // - // This parameter is required only when the destination KMS key is an asymmetric - // KMS key. - DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies that encryption context to use when the reencrypting the data. - // - // A destination encryption context is valid only when the destination KMS key - // is a symmetric encryption KMS key. The standard ciphertext format for asymmetric - // KMS keys does not include fields for metadata. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - DestinationEncryptionContext map[string]*string `type:"map"` - - // A unique identifier for the KMS key that is used to reencrypt the data. Specify - // a symmetric encryption KMS key or an asymmetric KMS key with a KeyUsage value - // of ENCRYPT_DECRYPT. To find the KeyUsage value of a KMS key, use the DescribeKey - // operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // DestinationKeyId is a required field - DestinationKeyId *string `min:"1" type:"string" required:"true"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Specifies the encryption algorithm that KMS will use to decrypt the ciphertext - // before it is reencrypted. The default value, SYMMETRIC_DEFAULT, represents - // the algorithm used for symmetric encryption KMS keys. - // - // Specify the same algorithm that was used to encrypt the ciphertext. If you - // specify a different algorithm, the decrypt attempt fails. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. - SourceEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Specifies the encryption context to use to decrypt the ciphertext. Enter - // the same encryption context that was used to encrypt the ciphertext. - // - // An encryption context is a collection of non-secret key-value pairs that - // represent additional authenticated data. When you use an encryption context - // to encrypt data, you must specify the same (an exact case-sensitive match) - // encryption context to decrypt the data. An encryption context is supported - // only on operations with symmetric encryption KMS keys. On operations with - // symmetric encryption KMS keys, an encryption context is optional, but it - // is strongly recommended. - // - // For more information, see Encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) - // in the Key Management Service Developer Guide. - SourceEncryptionContext map[string]*string `type:"map"` - - // Specifies the KMS key that KMS will use to decrypt the ciphertext before - // it is re-encrypted. - // - // Enter a key ID of the KMS key that was used to encrypt the ciphertext. If - // you identify a different KMS key, the ReEncrypt operation throws an IncorrectKeyException. - // - // This parameter is required only when the ciphertext was encrypted under an - // asymmetric KMS key. If you used a symmetric encryption KMS key, KMS can get - // the KMS key from metadata that it adds to the symmetric ciphertext blob. - // However, it is always recommended as a best practice. This practice ensures - // that you use the KMS key that you intend. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - SourceKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReEncryptInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReEncryptInput"} - if s.CiphertextBlob == nil { - invalidParams.Add(request.NewErrParamRequired("CiphertextBlob")) - } - if s.CiphertextBlob != nil && len(s.CiphertextBlob) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CiphertextBlob", 1)) - } - if s.DestinationKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationKeyId")) - } - if s.DestinationKeyId != nil && len(*s.DestinationKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationKeyId", 1)) - } - if s.SourceKeyId != nil && len(*s.SourceKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SourceKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *ReEncryptInput) SetCiphertextBlob(v []byte) *ReEncryptInput { - s.CiphertextBlob = v - return s -} - -// SetDestinationEncryptionAlgorithm sets the DestinationEncryptionAlgorithm field's value. -func (s *ReEncryptInput) SetDestinationEncryptionAlgorithm(v string) *ReEncryptInput { - s.DestinationEncryptionAlgorithm = &v - return s -} - -// SetDestinationEncryptionContext sets the DestinationEncryptionContext field's value. -func (s *ReEncryptInput) SetDestinationEncryptionContext(v map[string]*string) *ReEncryptInput { - s.DestinationEncryptionContext = v - return s -} - -// SetDestinationKeyId sets the DestinationKeyId field's value. -func (s *ReEncryptInput) SetDestinationKeyId(v string) *ReEncryptInput { - s.DestinationKeyId = &v - return s -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *ReEncryptInput) SetGrantTokens(v []*string) *ReEncryptInput { - s.GrantTokens = v - return s -} - -// SetSourceEncryptionAlgorithm sets the SourceEncryptionAlgorithm field's value. -func (s *ReEncryptInput) SetSourceEncryptionAlgorithm(v string) *ReEncryptInput { - s.SourceEncryptionAlgorithm = &v - return s -} - -// SetSourceEncryptionContext sets the SourceEncryptionContext field's value. -func (s *ReEncryptInput) SetSourceEncryptionContext(v map[string]*string) *ReEncryptInput { - s.SourceEncryptionContext = v - return s -} - -// SetSourceKeyId sets the SourceKeyId field's value. -func (s *ReEncryptInput) SetSourceKeyId(v string) *ReEncryptInput { - s.SourceKeyId = &v - return s -} - -type ReEncryptOutput struct { - _ struct{} `type:"structure"` - - // The reencrypted data. When you use the HTTP API or the Amazon Web Services - // CLI, the value is Base64-encoded. Otherwise, it is not Base64-encoded. - // CiphertextBlob is automatically base64 encoded/decoded by the SDK. - CiphertextBlob []byte `min:"1" type:"blob"` - - // The encryption algorithm that was used to reencrypt the data. - DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key that was used to reencrypt the data. - KeyId *string `min:"1" type:"string"` - - // The encryption algorithm that was used to decrypt the ciphertext before it - // was reencrypted. - SourceEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - - // Unique identifier of the KMS key used to originally encrypt the data. - SourceKeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReEncryptOutput) GoString() string { - return s.String() -} - -// SetCiphertextBlob sets the CiphertextBlob field's value. -func (s *ReEncryptOutput) SetCiphertextBlob(v []byte) *ReEncryptOutput { - s.CiphertextBlob = v - return s -} - -// SetDestinationEncryptionAlgorithm sets the DestinationEncryptionAlgorithm field's value. -func (s *ReEncryptOutput) SetDestinationEncryptionAlgorithm(v string) *ReEncryptOutput { - s.DestinationEncryptionAlgorithm = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ReEncryptOutput) SetKeyId(v string) *ReEncryptOutput { - s.KeyId = &v - return s -} - -// SetSourceEncryptionAlgorithm sets the SourceEncryptionAlgorithm field's value. -func (s *ReEncryptOutput) SetSourceEncryptionAlgorithm(v string) *ReEncryptOutput { - s.SourceEncryptionAlgorithm = &v - return s -} - -// SetSourceKeyId sets the SourceKeyId field's value. -func (s *ReEncryptOutput) SetSourceKeyId(v string) *ReEncryptOutput { - s.SourceKeyId = &v - return s -} - -type ReplicateKeyInput struct { - _ struct{} `type:"structure"` - - // A flag to indicate whether to bypass the key policy lockout safety check. - // - // Setting this value to true increases the risk that the KMS key becomes unmanageable. - // Do not set this value to true indiscriminately. - // - // For more information, refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section in the Key Management Service Developer Guide. - // - // Use this parameter only when you intend to prevent the principal that is - // making the request from making a subsequent PutKeyPolicy request on the KMS - // key. - // - // The default value is false. - BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` - - // A description of the KMS key. The default value is an empty string (no description). - // - // The description is not a shared property of multi-Region keys. You can specify - // the same description or a different description for each key in a set of - // related multi-Region keys. KMS does not synchronize this property. - Description *string `type:"string"` - - // Identifies the multi-Region primary key that is being replicated. To determine - // whether a KMS key is a multi-Region primary key, use the DescribeKey operation - // to check the value of the MultiRegionKeyType property. - // - // Specify the key ID or key ARN of a multi-Region primary key. - // - // For example: - // - // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The key policy to attach to the KMS key. This parameter is optional. If you - // do not provide a key policy, KMS attaches the default key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) - // to the KMS key. - // - // The key policy is not a shared property of multi-Region keys. You can specify - // the same key policy or a different key policy for each key in a set of related - // multi-Region keys. KMS does not synchronize this property. - // - // If you provide a key policy, it must meet the following criteria: - // - // * If you don't set BypassPolicyLockoutSafetyCheck to true, the key policy - // must give the caller kms:PutKeyPolicy permission on the replica key. This - // reduces the risk that the KMS key becomes unmanageable. For more information, - // refer to the scenario in the Default Key Policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) - // section of the Key Management Service Developer Guide . - // - // * Each statement in the key policy must contain one or more principals. - // The principals in the key policy must exist and be visible to KMS. When - // you create a new Amazon Web Services principal (for example, an IAM user - // or role), you might need to enforce a delay before including the new principal - // in a key policy because the new principal might not be immediately visible - // to KMS. For more information, see Changes that I make are not always immediately - // visible (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) - // in the Identity and Access Management User Guide . - // - // * The key policy size quota is 32 kilobytes (32768 bytes). - Policy *string `min:"1" type:"string"` - - // The Region ID of the Amazon Web Services Region for this replica key. - // - // Enter the Region ID, such as us-east-1 or ap-southeast-2. For a list of Amazon - // Web Services Regions in which KMS is supported, see KMS service endpoints - // (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) in the - // Amazon Web Services General Reference. - // - // HMAC KMS keys are not supported in all Amazon Web Services Regions. If you - // try to replicate an HMAC KMS key in an Amazon Web Services Region in which - // HMAC keys are not supported, the ReplicateKey operation returns an UnsupportedOperationException. - // For a list of Regions in which HMAC KMS keys are supported, see HMAC keys - // in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/hmac.html) - // in the Key Management Service Developer Guide. - // - // The replica must be in a different Amazon Web Services Region than its primary - // key and other replicas of that primary key, but in the same Amazon Web Services - // partition. KMS must be available in the replica Region. If the Region is - // not enabled by default, the Amazon Web Services account must be enabled in - // the Region. For information about Amazon Web Services partitions, see Amazon - // Resource Names (ARNs) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // in the Amazon Web Services General Reference. For information about enabling - // and disabling Regions, see Enabling a Region (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-enable) - // and Disabling a Region (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html#rande-manage-disable) - // in the Amazon Web Services General Reference. - // - // ReplicaRegion is a required field - ReplicaRegion *string `min:"1" type:"string" required:"true"` - - // Assigns one or more tags to the replica key. Use this parameter to tag the - // KMS key when it is created. To tag an existing KMS key, use the TagResource - // operation. - // - // Tagging or untagging a KMS key can allow or deny permission to the KMS key. - // For details, see ABAC in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/abac.html) - // in the Key Management Service Developer Guide. - // - // To use this parameter, you must have kms:TagResource (https://docs.aws.amazon.com/kms/latest/developerguide/kms-api-permissions-reference.html) - // permission in an IAM policy. - // - // Tags are not a shared property of multi-Region keys. You can specify the - // same tags or different tags for each key in a set of related multi-Region - // keys. KMS does not synchronize this property. - // - // Each tag consists of a tag key and a tag value. Both the tag key and the - // tag value are required, but the tag value can be an empty (null) string. - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // When you add tags to an Amazon Web Services resource, Amazon Web Services - // generates a cost allocation report with usage and costs aggregated by tags. - // Tags can also be used to control access to a KMS key. For details, see Tagging - // Keys (https://docs.aws.amazon.com/kms/latest/developerguide/tagging-keys.html). - Tags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicateKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicateKeyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ReplicaRegion == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicaRegion")) - } - if s.ReplicaRegion != nil && len(*s.ReplicaRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReplicaRegion", 1)) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBypassPolicyLockoutSafetyCheck sets the BypassPolicyLockoutSafetyCheck field's value. -func (s *ReplicateKeyInput) SetBypassPolicyLockoutSafetyCheck(v bool) *ReplicateKeyInput { - s.BypassPolicyLockoutSafetyCheck = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ReplicateKeyInput) SetDescription(v string) *ReplicateKeyInput { - s.Description = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ReplicateKeyInput) SetKeyId(v string) *ReplicateKeyInput { - s.KeyId = &v - return s -} - -// SetPolicy sets the Policy field's value. -func (s *ReplicateKeyInput) SetPolicy(v string) *ReplicateKeyInput { - s.Policy = &v - return s -} - -// SetReplicaRegion sets the ReplicaRegion field's value. -func (s *ReplicateKeyInput) SetReplicaRegion(v string) *ReplicateKeyInput { - s.ReplicaRegion = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ReplicateKeyInput) SetTags(v []*Tag) *ReplicateKeyInput { - s.Tags = v - return s -} - -type ReplicateKeyOutput struct { - _ struct{} `type:"structure"` - - // Displays details about the new replica key, including its Amazon Resource - // Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // and Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). - // It also includes the ARN and Amazon Web Services Region of its primary key - // and other replica keys. - ReplicaKeyMetadata *KeyMetadata `type:"structure"` - - // The key policy of the new replica key. The value is a key policy document - // in JSON format. - ReplicaPolicy *string `min:"1" type:"string"` - - // The tags on the new replica key. The value is a list of tag key and tag value - // pairs. - ReplicaTags []*Tag `type:"list"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ReplicateKeyOutput) GoString() string { - return s.String() -} - -// SetReplicaKeyMetadata sets the ReplicaKeyMetadata field's value. -func (s *ReplicateKeyOutput) SetReplicaKeyMetadata(v *KeyMetadata) *ReplicateKeyOutput { - s.ReplicaKeyMetadata = v - return s -} - -// SetReplicaPolicy sets the ReplicaPolicy field's value. -func (s *ReplicateKeyOutput) SetReplicaPolicy(v string) *ReplicateKeyOutput { - s.ReplicaPolicy = &v - return s -} - -// SetReplicaTags sets the ReplicaTags field's value. -func (s *ReplicateKeyOutput) SetReplicaTags(v []*Tag) *ReplicateKeyOutput { - s.ReplicaTags = v - return s -} - -type RetireGrantInput struct { - _ struct{} `type:"structure"` - - // Identifies the grant to retire. To get the grant ID, use CreateGrant, ListGrants, - // or ListRetirableGrants. - // - // * Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 - GrantId *string `min:"1" type:"string"` - - // Identifies the grant to be retired. You can use a grant token to identify - // a new grant even before it has achieved eventual consistency. - // - // Only the CreateGrant operation returns a grant token. For details, see Grant - // token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Eventual consistency (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#terms-eventual-consistency) - // in the Key Management Service Developer Guide. - GrantToken *string `min:"1" type:"string"` - - // The key ARN KMS key associated with the grant. To find the key ARN, use the - // ListKeys operation. - // - // For example: arn:aws:kms:us-east-2:444455556666:key/1234abcd-12ab-34cd-56ef-1234567890ab - KeyId *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RetireGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RetireGrantInput"} - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.GrantToken != nil && len(*s.GrantToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantToken", 1)) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *RetireGrantInput) SetGrantId(v string) *RetireGrantInput { - s.GrantId = &v - return s -} - -// SetGrantToken sets the GrantToken field's value. -func (s *RetireGrantInput) SetGrantToken(v string) *RetireGrantInput { - s.GrantToken = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *RetireGrantInput) SetKeyId(v string) *RetireGrantInput { - s.KeyId = &v - return s -} - -type RetireGrantOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RetireGrantOutput) GoString() string { - return s.String() -} - -type RevokeGrantInput struct { - _ struct{} `type:"structure"` - - // Identifies the grant to revoke. To get the grant ID, use CreateGrant, ListGrants, - // or ListRetirableGrants. - // - // GrantId is a required field - GrantId *string `min:"1" type:"string" required:"true"` - - // A unique identifier for the KMS key associated with the grant. To get the - // key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // Specify the key ID or key ARN of the KMS key. To specify a KMS key in a different - // Amazon Web Services account, you must use the key ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RevokeGrantInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RevokeGrantInput"} - if s.GrantId == nil { - invalidParams.Add(request.NewErrParamRequired("GrantId")) - } - if s.GrantId != nil && len(*s.GrantId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantId", 1)) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantId sets the GrantId field's value. -func (s *RevokeGrantInput) SetGrantId(v string) *RevokeGrantInput { - s.GrantId = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *RevokeGrantInput) SetKeyId(v string) *RevokeGrantInput { - s.KeyId = &v - return s -} - -type RevokeGrantOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s RevokeGrantOutput) GoString() string { - return s.String() -} - -type ScheduleKeyDeletionInput struct { - _ struct{} `type:"structure"` - - // The unique identifier of the KMS key to delete. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The waiting period, specified in number of days. After the waiting period - // ends, KMS deletes the KMS key. - // - // If the KMS key is a multi-Region primary key with replicas, the waiting period - // begins when the last of its replica keys is deleted. Otherwise, the waiting - // period begins immediately. - // - // This value is optional. If you include a value, it must be between 7 and - // 30, inclusive. If you do not include a value, it defaults to 30. - PendingWindowInDays *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ScheduleKeyDeletionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ScheduleKeyDeletionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PendingWindowInDays != nil && *s.PendingWindowInDays < 1 { - invalidParams.Add(request.NewErrParamMinValue("PendingWindowInDays", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *ScheduleKeyDeletionInput) SetKeyId(v string) *ScheduleKeyDeletionInput { - s.KeyId = &v - return s -} - -// SetPendingWindowInDays sets the PendingWindowInDays field's value. -func (s *ScheduleKeyDeletionInput) SetPendingWindowInDays(v int64) *ScheduleKeyDeletionInput { - s.PendingWindowInDays = &v - return s -} - -type ScheduleKeyDeletionOutput struct { - _ struct{} `type:"structure"` - - // The date and time after which KMS deletes the KMS key. - // - // If the KMS key is a multi-Region primary key with replica keys, this field - // does not appear. The deletion date for the primary key isn't known until - // its last replica key is deleted. - DeletionDate *time.Time `type:"timestamp"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the KMS key whose deletion is scheduled. - KeyId *string `min:"1" type:"string"` - - // The current status of the KMS key. - // - // For more information about how key state affects the use of a KMS key, see - // Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide. - KeyState *string `type:"string" enum:"KeyState"` - - // The waiting period before the KMS key is deleted. - // - // If the KMS key is a multi-Region primary key with replicas, the waiting period - // begins when the last of its replica keys is deleted. Otherwise, the waiting - // period begins immediately. - PendingWindowInDays *int64 `min:"1" type:"integer"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ScheduleKeyDeletionOutput) GoString() string { - return s.String() -} - -// SetDeletionDate sets the DeletionDate field's value. -func (s *ScheduleKeyDeletionOutput) SetDeletionDate(v time.Time) *ScheduleKeyDeletionOutput { - s.DeletionDate = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *ScheduleKeyDeletionOutput) SetKeyId(v string) *ScheduleKeyDeletionOutput { - s.KeyId = &v - return s -} - -// SetKeyState sets the KeyState field's value. -func (s *ScheduleKeyDeletionOutput) SetKeyState(v string) *ScheduleKeyDeletionOutput { - s.KeyState = &v - return s -} - -// SetPendingWindowInDays sets the PendingWindowInDays field's value. -func (s *ScheduleKeyDeletionOutput) SetPendingWindowInDays(v int64) *ScheduleKeyDeletionOutput { - s.PendingWindowInDays = &v - return s -} - -type SignInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies an asymmetric KMS key. KMS uses the private key in the asymmetric - // KMS key to sign the message. The KeyUsage type of the KMS key must be SIGN_VERIFY. - // To find the KeyUsage of a KMS key, use the DescribeKey operation. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the message or message digest to sign. Messages can be 0-4096 bytes. - // To sign a larger message, provide the message digest. - // - // If you provide a message, KMS generates a hash digest of the message and - // then signs it. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by SignInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` - - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. - MessageType *string `type:"string" enum:"MessageType"` - - // Specifies the signing algorithm to use when signing the message. - // - // Choose an algorithm that is compatible with the type and size of the specified - // asymmetric KMS key. - // - // SigningAlgorithm is a required field - SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *SignInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SignInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.SigningAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SigningAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *SignInput) SetGrantTokens(v []*string) *SignInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *SignInput) SetKeyId(v string) *SignInput { - s.KeyId = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *SignInput) SetMessage(v []byte) *SignInput { - s.Message = v - return s -} - -// SetMessageType sets the MessageType field's value. -func (s *SignInput) SetMessageType(v string) *SignInput { - s.MessageType = &v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *SignInput) SetSigningAlgorithm(v string) *SignInput { - s.SigningAlgorithm = &v - return s -} - -type SignOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to sign the message. - KeyId *string `min:"1" type:"string"` - - // The cryptographic signature that was generated for the message. - // - // * When used with the supported RSA signing algorithms, the encoding of - // this value is defined by PKCS #1 in RFC 8017 (https://tools.ietf.org/html/rfc8017). - // - // * When used with the ECDSA_SHA_256, ECDSA_SHA_384, or ECDSA_SHA_512 signing - // algorithms, this value is a DER-encoded object as defined by ANS X9.62–2005 - // and RFC 3279 Section 2.2.3 (https://tools.ietf.org/html/rfc3279#section-2.2.3). - // This is the most commonly used signature format and is appropriate for - // most uses. - // - // When you use the HTTP API or the Amazon Web Services CLI, the value is Base64-encoded. - // Otherwise, it is not Base64-encoded. - // Signature is automatically base64 encoded/decoded by the SDK. - Signature []byte `min:"1" type:"blob"` - - // The signing algorithm that was used to sign the message. - SigningAlgorithm *string `type:"string" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s SignOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *SignOutput) SetKeyId(v string) *SignOutput { - s.KeyId = &v - return s -} - -// SetSignature sets the Signature field's value. -func (s *SignOutput) SetSignature(v []byte) *SignOutput { - s.Signature = v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *SignOutput) SetSigningAlgorithm(v string) *SignOutput { - s.SigningAlgorithm = &v - return s -} - -// A key-value pair. A tag consists of a tag key and a tag value. Tag keys and -// tag values are both required, but tag values can be empty (null) strings. -// -// For information about the rules that apply to tag keys and tag values, see -// User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// in the Amazon Web Services Billing and Cost Management User Guide. -type Tag struct { - _ struct{} `type:"structure"` - - // The key of the tag. - // - // TagKey is a required field - TagKey *string `min:"1" type:"string" required:"true"` - - // The value of the tag. - // - // TagValue is a required field - TagValue *string `type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s Tag) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.TagKey == nil { - invalidParams.Add(request.NewErrParamRequired("TagKey")) - } - if s.TagKey != nil && len(*s.TagKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TagKey", 1)) - } - if s.TagValue == nil { - invalidParams.Add(request.NewErrParamRequired("TagValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetTagKey sets the TagKey field's value. -func (s *Tag) SetTagKey(v string) *Tag { - s.TagKey = &v - return s -} - -// SetTagValue sets the TagValue field's value. -func (s *Tag) SetTagValue(v string) *Tag { - s.TagValue = &v - return s -} - -// The request was rejected because one or more tags are not valid. -type TagException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagException) GoString() string { - return s.String() -} - -func newErrorTagException(v protocol.ResponseMetadata) error { - return &TagException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *TagException) Code() string { - return "TagException" -} - -// Message returns the exception's message. -func (s *TagException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TagException) OrigErr() error { - return nil -} - -func (s *TagException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *TagException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *TagException) RequestID() string { - return s.RespMetadata.RequestID -} - -type TagResourceInput struct { - _ struct{} `type:"structure"` - - // Identifies a customer managed key in the account and Region. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // One or more tags. - // - // Each tag consists of a tag key and a tag value. The tag value can be an empty - // (null) string. - // - // You cannot have more than one tag on a KMS key with the same tag key. If - // you specify an existing tag key with a different tag value, KMS replaces - // the current tag value with the specified one. - // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *TagResourceInput) SetKeyId(v string) *TagResourceInput { - s.KeyId = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v - return s -} - -type TagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { - return s.String() -} - -// The request was rejected because a specified parameter is not supported or -// a specified resource is not valid for this operation. -type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsupportedOperationException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UnsupportedOperationException) GoString() string { - return s.String() -} - -func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { - return &UnsupportedOperationException{ - RespMetadata: v, - } -} - -// Code returns the exception type name. -func (s *UnsupportedOperationException) Code() string { - return "UnsupportedOperationException" -} - -// Message returns the exception's message. -func (s *UnsupportedOperationException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *UnsupportedOperationException) OrigErr() error { - return nil -} - -func (s *UnsupportedOperationException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s *UnsupportedOperationException) StatusCode() int { - return s.RespMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s *UnsupportedOperationException) RequestID() string { - return s.RespMetadata.RequestID -} - -type UntagResourceInput struct { - _ struct{} `type:"structure"` - - // Identifies the KMS key from which you are removing tags. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // One or more tag keys. Specify only the tag keys, not the tag values. - // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *UntagResourceInput) SetKeyId(v string) *UntagResourceInput { - s.KeyId = &v - return s -} - -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v - return s -} - -type UntagResourceOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() -} - -type UpdateAliasInput struct { - _ struct{} `type:"structure"` - - // Identifies the alias that is changing its KMS key. This value must begin - // with alias/ followed by the alias name, such as alias/ExampleAlias. You cannot - // use UpdateAlias to change the alias name. - // - // AliasName is a required field - AliasName *string `min:"1" type:"string" required:"true"` - - // Identifies the customer managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) - // to associate with the alias. You don't have permission to associate an alias - // with an Amazon Web Services managed key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk). - // - // The KMS key must be in the same Amazon Web Services account and Region as - // the alias. Also, the new target KMS key must be the same type as the current - // target KMS key (both symmetric or both asymmetric) and they must have the - // same key usage. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // To verify that the alias is mapped to the correct KMS key, use ListAliases. - // - // TargetKeyId is a required field - TargetKeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.TargetKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetKeyId")) - } - if s.TargetKeyId != nil && len(*s.TargetKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetKeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAliasName sets the AliasName field's value. -func (s *UpdateAliasInput) SetAliasName(v string) *UpdateAliasInput { - s.AliasName = &v - return s -} - -// SetTargetKeyId sets the TargetKeyId field's value. -func (s *UpdateAliasInput) SetTargetKeyId(v string) *UpdateAliasInput { - s.TargetKeyId = &v - return s -} - -type UpdateAliasOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateAliasOutput) GoString() string { - return s.String() -} - -type UpdateCustomKeyStoreInput struct { - _ struct{} `type:"structure"` - - // Associates the custom key store with a related CloudHSM cluster. - // - // Enter the cluster ID of the cluster that you used to create the custom key - // store or a cluster that shares a backup history and has the same cluster - // certificate as the original cluster. You cannot use this parameter to associate - // a custom key store with an unrelated cluster. In addition, the replacement - // cluster must fulfill the requirements (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) - // for a cluster associated with a custom key store. To view the cluster certificate - // of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - CloudHsmClusterId *string `min:"19" type:"string"` - - // Identifies the custom key store that you want to update. Enter the ID of - // the custom key store. To find the ID of a custom key store, use the DescribeCustomKeyStores - // operation. - // - // CustomKeyStoreId is a required field - CustomKeyStoreId *string `min:"1" type:"string" required:"true"` - - // Enter the current password of the kmsuser crypto user (CU) in the CloudHSM - // cluster that is associated with the custom key store. - // - // This parameter tells KMS the current password of the kmsuser crypto user - // (CU). It does not set or change the password of any users in the CloudHSM - // cluster. - // - // KeyStorePassword is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by UpdateCustomKeyStoreInput's - // String and GoString methods. - KeyStorePassword *string `min:"7" type:"string" sensitive:"true"` - - // Changes the friendly name of the custom key store to the value that you specify. - // The custom key store name must be unique in the Amazon Web Services account. - NewCustomKeyStoreName *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateCustomKeyStoreInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateCustomKeyStoreInput"} - if s.CloudHsmClusterId != nil && len(*s.CloudHsmClusterId) < 19 { - invalidParams.Add(request.NewErrParamMinLen("CloudHsmClusterId", 19)) - } - if s.CustomKeyStoreId == nil { - invalidParams.Add(request.NewErrParamRequired("CustomKeyStoreId")) - } - if s.CustomKeyStoreId != nil && len(*s.CustomKeyStoreId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CustomKeyStoreId", 1)) - } - if s.KeyStorePassword != nil && len(*s.KeyStorePassword) < 7 { - invalidParams.Add(request.NewErrParamMinLen("KeyStorePassword", 7)) - } - if s.NewCustomKeyStoreName != nil && len(*s.NewCustomKeyStoreName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NewCustomKeyStoreName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCloudHsmClusterId sets the CloudHsmClusterId field's value. -func (s *UpdateCustomKeyStoreInput) SetCloudHsmClusterId(v string) *UpdateCustomKeyStoreInput { - s.CloudHsmClusterId = &v - return s -} - -// SetCustomKeyStoreId sets the CustomKeyStoreId field's value. -func (s *UpdateCustomKeyStoreInput) SetCustomKeyStoreId(v string) *UpdateCustomKeyStoreInput { - s.CustomKeyStoreId = &v - return s -} - -// SetKeyStorePassword sets the KeyStorePassword field's value. -func (s *UpdateCustomKeyStoreInput) SetKeyStorePassword(v string) *UpdateCustomKeyStoreInput { - s.KeyStorePassword = &v - return s -} - -// SetNewCustomKeyStoreName sets the NewCustomKeyStoreName field's value. -func (s *UpdateCustomKeyStoreInput) SetNewCustomKeyStoreName(v string) *UpdateCustomKeyStoreInput { - s.NewCustomKeyStoreName = &v - return s -} - -type UpdateCustomKeyStoreOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateCustomKeyStoreOutput) GoString() string { - return s.String() -} - -type UpdateKeyDescriptionInput struct { - _ struct{} `type:"structure"` - - // New description for the KMS key. - // - // Description is a required field - Description *string `type:"string" required:"true"` - - // Updates the description of the specified KMS key. - // - // Specify the key ID or key ARN of the KMS key. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateKeyDescriptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateKeyDescriptionInput"} - if s.Description == nil { - invalidParams.Add(request.NewErrParamRequired("Description")) - } - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDescription sets the Description field's value. -func (s *UpdateKeyDescriptionInput) SetDescription(v string) *UpdateKeyDescriptionInput { - s.Description = &v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *UpdateKeyDescriptionInput) SetKeyId(v string) *UpdateKeyDescriptionInput { - s.KeyId = &v - return s -} - -type UpdateKeyDescriptionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdateKeyDescriptionOutput) GoString() string { - return s.String() -} - -type UpdatePrimaryRegionInput struct { - _ struct{} `type:"structure"` - - // Identifies the current primary key. When the operation completes, this KMS - // key will be a replica key. - // - // Specify the key ID or key ARN of a multi-Region primary key. - // - // For example: - // - // * Key ID: mrk-1234abcd12ab34cd56ef1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/mrk-1234abcd12ab34cd56ef1234567890ab - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The Amazon Web Services Region of the new primary key. Enter the Region ID, - // such as us-east-1 or ap-southeast-2. There must be an existing replica key - // in this Region. - // - // When the operation completes, the multi-Region key in this Region will be - // the primary key. - // - // PrimaryRegion is a required field - PrimaryRegion *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdatePrimaryRegionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdatePrimaryRegionInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.PrimaryRegion == nil { - invalidParams.Add(request.NewErrParamRequired("PrimaryRegion")) - } - if s.PrimaryRegion != nil && len(*s.PrimaryRegion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PrimaryRegion", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetKeyId sets the KeyId field's value. -func (s *UpdatePrimaryRegionInput) SetKeyId(v string) *UpdatePrimaryRegionInput { - s.KeyId = &v - return s -} - -// SetPrimaryRegion sets the PrimaryRegion field's value. -func (s *UpdatePrimaryRegionInput) SetPrimaryRegion(v string) *UpdatePrimaryRegionInput { - s.PrimaryRegion = &v - return s -} - -type UpdatePrimaryRegionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UpdatePrimaryRegionOutput) GoString() string { - return s.String() -} - -type VerifyInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // Identifies the asymmetric KMS key that will be used to verify the signature. - // This must be the same KMS key that was used to generate the signature. If - // you specify a different KMS key, the signature verification fails. - // - // To specify a KMS key, use its key ID, key ARN, alias name, or alias ARN. - // When using an alias name, prefix it with "alias/". To specify a KMS key in - // a different Amazon Web Services account, you must use the key ARN or alias - // ARN. - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a KMS key, use ListKeys or DescribeKey. - // To get the alias name and alias ARN, use ListAliases. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // Specifies the message that was signed. You can submit a raw message of up - // to 4096 bytes, or a hash digest of the message. If you submit a digest, use - // the MessageType parameter with a value of DIGEST. - // - // If the message specified here is different from the message that was signed, - // the signature verification fails. A message and its hash digest are considered - // to be the same message. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by VerifyInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` - - // Tells KMS whether the value of the Message parameter is a message or message - // digest. The default value, RAW, indicates a message. To indicate a message - // digest, enter DIGEST. - // - // Use the DIGEST value only when the value of the Message parameter is a message - // digest. If you use the DIGEST value with a raw message, the security of the - // verification operation can be compromised. - MessageType *string `type:"string" enum:"MessageType"` - - // The signature that the Sign operation generated. - // Signature is automatically base64 encoded/decoded by the SDK. - // - // Signature is a required field - Signature []byte `min:"1" type:"blob" required:"true"` - - // The signing algorithm that was used to sign the message. If you submit a - // different algorithm, the signature verification fails. - // - // SigningAlgorithm is a required field - SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *VerifyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "VerifyInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.Signature == nil { - invalidParams.Add(request.NewErrParamRequired("Signature")) - } - if s.Signature != nil && len(s.Signature) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Signature", 1)) - } - if s.SigningAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("SigningAlgorithm")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *VerifyInput) SetGrantTokens(v []*string) *VerifyInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyInput) SetKeyId(v string) *VerifyInput { - s.KeyId = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *VerifyInput) SetMessage(v []byte) *VerifyInput { - s.Message = v - return s -} - -// SetMessageType sets the MessageType field's value. -func (s *VerifyInput) SetMessageType(v string) *VerifyInput { - s.MessageType = &v - return s -} - -// SetSignature sets the Signature field's value. -func (s *VerifyInput) SetSignature(v []byte) *VerifyInput { - s.Signature = v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *VerifyInput) SetSigningAlgorithm(v string) *VerifyInput { - s.SigningAlgorithm = &v - return s -} - -type VerifyMacInput struct { - _ struct{} `type:"structure"` - - // A list of grant tokens. - // - // Use a grant token when your permission to call this operation comes from - // a new grant that has not yet achieved eventual consistency. For more information, - // see Grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grants.html#grant_token) - // and Using a grant token (https://docs.aws.amazon.com/kms/latest/developerguide/grant-manage.html#using-grant-token) - // in the Key Management Service Developer Guide. - GrantTokens []*string `type:"list"` - - // The KMS key that will be used in the verification. - // - // Enter a key ID of the KMS key that was used to generate the HMAC. If you - // identify a different KMS key, the VerifyMac operation fails. - // - // KeyId is a required field - KeyId *string `min:"1" type:"string" required:"true"` - - // The HMAC to verify. Enter the HMAC that was generated by the GenerateMac - // operation when you specified the same message, HMAC KMS key, and MAC algorithm - // as the values specified in this request. - // Mac is automatically base64 encoded/decoded by the SDK. - // - // Mac is a required field - Mac []byte `min:"1" type:"blob" required:"true"` - - // The MAC algorithm that will be used in the verification. Enter the same MAC - // algorithm that was used to compute the HMAC. This algorithm must be supported - // by the HMAC KMS key identified by the KeyId parameter. - // - // MacAlgorithm is a required field - MacAlgorithm *string `type:"string" required:"true" enum:"MacAlgorithmSpec"` - - // The message that will be used in the verification. Enter the same message - // that was used to generate the HMAC. - // - // GenerateMac and VerifyMac do not provide special handling for message digests. - // If you generated an HMAC for a hash digest of a message, you must verify - // the HMAC for the same hash digest. - // - // Message is a sensitive parameter and its value will be - // replaced with "sensitive" in string returned by VerifyMacInput's - // String and GoString methods. - // - // Message is automatically base64 encoded/decoded by the SDK. - // - // Message is a required field - Message []byte `min:"1" type:"blob" required:"true" sensitive:"true"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyMacInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyMacInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *VerifyMacInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "VerifyMacInput"} - if s.KeyId == nil { - invalidParams.Add(request.NewErrParamRequired("KeyId")) - } - if s.KeyId != nil && len(*s.KeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) - } - if s.Mac == nil { - invalidParams.Add(request.NewErrParamRequired("Mac")) - } - if s.Mac != nil && len(s.Mac) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Mac", 1)) - } - if s.MacAlgorithm == nil { - invalidParams.Add(request.NewErrParamRequired("MacAlgorithm")) - } - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetGrantTokens sets the GrantTokens field's value. -func (s *VerifyMacInput) SetGrantTokens(v []*string) *VerifyMacInput { - s.GrantTokens = v - return s -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyMacInput) SetKeyId(v string) *VerifyMacInput { - s.KeyId = &v - return s -} - -// SetMac sets the Mac field's value. -func (s *VerifyMacInput) SetMac(v []byte) *VerifyMacInput { - s.Mac = v - return s -} - -// SetMacAlgorithm sets the MacAlgorithm field's value. -func (s *VerifyMacInput) SetMacAlgorithm(v string) *VerifyMacInput { - s.MacAlgorithm = &v - return s -} - -// SetMessage sets the Message field's value. -func (s *VerifyMacInput) SetMessage(v []byte) *VerifyMacInput { - s.Message = v - return s -} - -type VerifyMacOutput struct { - _ struct{} `type:"structure"` - - // The HMAC KMS key used in the verification. - KeyId *string `min:"1" type:"string"` - - // The MAC algorithm used in the verification. - MacAlgorithm *string `type:"string" enum:"MacAlgorithmSpec"` - - // A Boolean value that indicates whether the HMAC was verified. A value of - // True indicates that the HMAC (Mac) was generated with the specified Message, - // HMAC KMS key (KeyID) and MacAlgorithm.. - // - // If the HMAC is not verified, the VerifyMac operation fails with a KMSInvalidMacException - // exception. This exception indicates that one or more of the inputs changed - // since the HMAC was computed. - MacValid *bool `type:"boolean"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyMacOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyMacOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyMacOutput) SetKeyId(v string) *VerifyMacOutput { - s.KeyId = &v - return s -} - -// SetMacAlgorithm sets the MacAlgorithm field's value. -func (s *VerifyMacOutput) SetMacAlgorithm(v string) *VerifyMacOutput { - s.MacAlgorithm = &v - return s -} - -// SetMacValid sets the MacValid field's value. -func (s *VerifyMacOutput) SetMacValid(v bool) *VerifyMacOutput { - s.MacValid = &v - return s -} - -type VerifyOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) - // of the asymmetric KMS key that was used to verify the signature. - KeyId *string `min:"1" type:"string"` - - // A Boolean value that indicates whether the signature was verified. A value - // of True indicates that the Signature was produced by signing the Message - // with the specified KeyID and SigningAlgorithm. If the signature is not verified, - // the Verify operation fails with a KMSInvalidSignatureException exception. - SignatureValid *bool `type:"boolean"` - - // The signing algorithm that was used to verify the signature. - SigningAlgorithm *string `type:"string" enum:"SigningAlgorithmSpec"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s VerifyOutput) GoString() string { - return s.String() -} - -// SetKeyId sets the KeyId field's value. -func (s *VerifyOutput) SetKeyId(v string) *VerifyOutput { - s.KeyId = &v - return s -} - -// SetSignatureValid sets the SignatureValid field's value. -func (s *VerifyOutput) SetSignatureValid(v bool) *VerifyOutput { - s.SignatureValid = &v - return s -} - -// SetSigningAlgorithm sets the SigningAlgorithm field's value. -func (s *VerifyOutput) SetSigningAlgorithm(v string) *VerifyOutput { - s.SigningAlgorithm = &v - return s -} - -const ( - // AlgorithmSpecRsaesPkcs1V15 is a AlgorithmSpec enum value - AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5" - - // AlgorithmSpecRsaesOaepSha1 is a AlgorithmSpec enum value - AlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1" - - // AlgorithmSpecRsaesOaepSha256 is a AlgorithmSpec enum value - AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" -) - -// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum -func AlgorithmSpec_Values() []string { - return []string{ - AlgorithmSpecRsaesPkcs1V15, - AlgorithmSpecRsaesOaepSha1, - AlgorithmSpecRsaesOaepSha256, - } -} - -const ( - // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS" - - // ConnectionErrorCodeTypeClusterNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeClusterNotFound = "CLUSTER_NOT_FOUND" - - // ConnectionErrorCodeTypeNetworkErrors is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeNetworkErrors = "NETWORK_ERRORS" - - // ConnectionErrorCodeTypeInternalError is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInternalError = "INTERNAL_ERROR" - - // ConnectionErrorCodeTypeInsufficientCloudhsmHsms is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeInsufficientCloudhsmHsms = "INSUFFICIENT_CLOUDHSM_HSMS" - - // ConnectionErrorCodeTypeUserLockedOut is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserLockedOut = "USER_LOCKED_OUT" - - // ConnectionErrorCodeTypeUserNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserNotFound = "USER_NOT_FOUND" - - // ConnectionErrorCodeTypeUserLoggedIn is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeUserLoggedIn = "USER_LOGGED_IN" - - // ConnectionErrorCodeTypeSubnetNotFound is a ConnectionErrorCodeType enum value - ConnectionErrorCodeTypeSubnetNotFound = "SUBNET_NOT_FOUND" -) - -// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum -func ConnectionErrorCodeType_Values() []string { - return []string{ - ConnectionErrorCodeTypeInvalidCredentials, - ConnectionErrorCodeTypeClusterNotFound, - ConnectionErrorCodeTypeNetworkErrors, - ConnectionErrorCodeTypeInternalError, - ConnectionErrorCodeTypeInsufficientCloudhsmHsms, - ConnectionErrorCodeTypeUserLockedOut, - ConnectionErrorCodeTypeUserNotFound, - ConnectionErrorCodeTypeUserLoggedIn, - ConnectionErrorCodeTypeSubnetNotFound, - } -} - -const ( - // ConnectionStateTypeConnected is a ConnectionStateType enum value - ConnectionStateTypeConnected = "CONNECTED" - - // ConnectionStateTypeConnecting is a ConnectionStateType enum value - ConnectionStateTypeConnecting = "CONNECTING" - - // ConnectionStateTypeFailed is a ConnectionStateType enum value - ConnectionStateTypeFailed = "FAILED" - - // ConnectionStateTypeDisconnected is a ConnectionStateType enum value - ConnectionStateTypeDisconnected = "DISCONNECTED" - - // ConnectionStateTypeDisconnecting is a ConnectionStateType enum value - ConnectionStateTypeDisconnecting = "DISCONNECTING" -) - -// ConnectionStateType_Values returns all elements of the ConnectionStateType enum -func ConnectionStateType_Values() []string { - return []string{ - ConnectionStateTypeConnected, - ConnectionStateTypeConnecting, - ConnectionStateTypeFailed, - ConnectionStateTypeDisconnected, - ConnectionStateTypeDisconnecting, - } -} - -const ( - // CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa2048 = "RSA_2048" - - // CustomerMasterKeySpecRsa3072 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa3072 = "RSA_3072" - - // CustomerMasterKeySpecRsa4096 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecRsa4096 = "RSA_4096" - - // CustomerMasterKeySpecEccNistP256 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP256 = "ECC_NIST_P256" - - // CustomerMasterKeySpecEccNistP384 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP384 = "ECC_NIST_P384" - - // CustomerMasterKeySpecEccNistP521 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccNistP521 = "ECC_NIST_P521" - - // CustomerMasterKeySpecEccSecgP256k1 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecEccSecgP256k1 = "ECC_SECG_P256K1" - - // CustomerMasterKeySpecSymmetricDefault is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" - - // CustomerMasterKeySpecHmac224 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecHmac224 = "HMAC_224" - - // CustomerMasterKeySpecHmac256 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecHmac256 = "HMAC_256" - - // CustomerMasterKeySpecHmac384 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecHmac384 = "HMAC_384" - - // CustomerMasterKeySpecHmac512 is a CustomerMasterKeySpec enum value - CustomerMasterKeySpecHmac512 = "HMAC_512" -) - -// CustomerMasterKeySpec_Values returns all elements of the CustomerMasterKeySpec enum -func CustomerMasterKeySpec_Values() []string { - return []string{ - CustomerMasterKeySpecRsa2048, - CustomerMasterKeySpecRsa3072, - CustomerMasterKeySpecRsa4096, - CustomerMasterKeySpecEccNistP256, - CustomerMasterKeySpecEccNistP384, - CustomerMasterKeySpecEccNistP521, - CustomerMasterKeySpecEccSecgP256k1, - CustomerMasterKeySpecSymmetricDefault, - CustomerMasterKeySpecHmac224, - CustomerMasterKeySpecHmac256, - CustomerMasterKeySpecHmac384, - CustomerMasterKeySpecHmac512, - } -} - -const ( - // DataKeyPairSpecRsa2048 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa2048 = "RSA_2048" - - // DataKeyPairSpecRsa3072 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa3072 = "RSA_3072" - - // DataKeyPairSpecRsa4096 is a DataKeyPairSpec enum value - DataKeyPairSpecRsa4096 = "RSA_4096" - - // DataKeyPairSpecEccNistP256 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP256 = "ECC_NIST_P256" - - // DataKeyPairSpecEccNistP384 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP384 = "ECC_NIST_P384" - - // DataKeyPairSpecEccNistP521 is a DataKeyPairSpec enum value - DataKeyPairSpecEccNistP521 = "ECC_NIST_P521" - - // DataKeyPairSpecEccSecgP256k1 is a DataKeyPairSpec enum value - DataKeyPairSpecEccSecgP256k1 = "ECC_SECG_P256K1" -) - -// DataKeyPairSpec_Values returns all elements of the DataKeyPairSpec enum -func DataKeyPairSpec_Values() []string { - return []string{ - DataKeyPairSpecRsa2048, - DataKeyPairSpecRsa3072, - DataKeyPairSpecRsa4096, - DataKeyPairSpecEccNistP256, - DataKeyPairSpecEccNistP384, - DataKeyPairSpecEccNistP521, - DataKeyPairSpecEccSecgP256k1, - } -} - -const ( - // DataKeySpecAes256 is a DataKeySpec enum value - DataKeySpecAes256 = "AES_256" - - // DataKeySpecAes128 is a DataKeySpec enum value - DataKeySpecAes128 = "AES_128" -) - -// DataKeySpec_Values returns all elements of the DataKeySpec enum -func DataKeySpec_Values() []string { - return []string{ - DataKeySpecAes256, - DataKeySpecAes128, - } -} - -const ( - // EncryptionAlgorithmSpecSymmetricDefault is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecSymmetricDefault = "SYMMETRIC_DEFAULT" - - // EncryptionAlgorithmSpecRsaesOaepSha1 is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1" - - // EncryptionAlgorithmSpecRsaesOaepSha256 is a EncryptionAlgorithmSpec enum value - EncryptionAlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" -) - -// EncryptionAlgorithmSpec_Values returns all elements of the EncryptionAlgorithmSpec enum -func EncryptionAlgorithmSpec_Values() []string { - return []string{ - EncryptionAlgorithmSpecSymmetricDefault, - EncryptionAlgorithmSpecRsaesOaepSha1, - EncryptionAlgorithmSpecRsaesOaepSha256, - } -} - -const ( - // ExpirationModelTypeKeyMaterialExpires is a ExpirationModelType enum value - ExpirationModelTypeKeyMaterialExpires = "KEY_MATERIAL_EXPIRES" - - // ExpirationModelTypeKeyMaterialDoesNotExpire is a ExpirationModelType enum value - ExpirationModelTypeKeyMaterialDoesNotExpire = "KEY_MATERIAL_DOES_NOT_EXPIRE" -) - -// ExpirationModelType_Values returns all elements of the ExpirationModelType enum -func ExpirationModelType_Values() []string { - return []string{ - ExpirationModelTypeKeyMaterialExpires, - ExpirationModelTypeKeyMaterialDoesNotExpire, - } -} - -const ( - // GrantOperationDecrypt is a GrantOperation enum value - GrantOperationDecrypt = "Decrypt" - - // GrantOperationEncrypt is a GrantOperation enum value - GrantOperationEncrypt = "Encrypt" - - // GrantOperationGenerateDataKey is a GrantOperation enum value - GrantOperationGenerateDataKey = "GenerateDataKey" - - // GrantOperationGenerateDataKeyWithoutPlaintext is a GrantOperation enum value - GrantOperationGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" - - // GrantOperationReEncryptFrom is a GrantOperation enum value - GrantOperationReEncryptFrom = "ReEncryptFrom" - - // GrantOperationReEncryptTo is a GrantOperation enum value - GrantOperationReEncryptTo = "ReEncryptTo" - - // GrantOperationSign is a GrantOperation enum value - GrantOperationSign = "Sign" - - // GrantOperationVerify is a GrantOperation enum value - GrantOperationVerify = "Verify" - - // GrantOperationGetPublicKey is a GrantOperation enum value - GrantOperationGetPublicKey = "GetPublicKey" - - // GrantOperationCreateGrant is a GrantOperation enum value - GrantOperationCreateGrant = "CreateGrant" - - // GrantOperationRetireGrant is a GrantOperation enum value - GrantOperationRetireGrant = "RetireGrant" - - // GrantOperationDescribeKey is a GrantOperation enum value - GrantOperationDescribeKey = "DescribeKey" - - // GrantOperationGenerateDataKeyPair is a GrantOperation enum value - GrantOperationGenerateDataKeyPair = "GenerateDataKeyPair" - - // GrantOperationGenerateDataKeyPairWithoutPlaintext is a GrantOperation enum value - GrantOperationGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" - - // GrantOperationGenerateMac is a GrantOperation enum value - GrantOperationGenerateMac = "GenerateMac" - - // GrantOperationVerifyMac is a GrantOperation enum value - GrantOperationVerifyMac = "VerifyMac" -) - -// GrantOperation_Values returns all elements of the GrantOperation enum -func GrantOperation_Values() []string { - return []string{ - GrantOperationDecrypt, - GrantOperationEncrypt, - GrantOperationGenerateDataKey, - GrantOperationGenerateDataKeyWithoutPlaintext, - GrantOperationReEncryptFrom, - GrantOperationReEncryptTo, - GrantOperationSign, - GrantOperationVerify, - GrantOperationGetPublicKey, - GrantOperationCreateGrant, - GrantOperationRetireGrant, - GrantOperationDescribeKey, - GrantOperationGenerateDataKeyPair, - GrantOperationGenerateDataKeyPairWithoutPlaintext, - GrantOperationGenerateMac, - GrantOperationVerifyMac, - } -} - -const ( - // KeyManagerTypeAws is a KeyManagerType enum value - KeyManagerTypeAws = "AWS" - - // KeyManagerTypeCustomer is a KeyManagerType enum value - KeyManagerTypeCustomer = "CUSTOMER" -) - -// KeyManagerType_Values returns all elements of the KeyManagerType enum -func KeyManagerType_Values() []string { - return []string{ - KeyManagerTypeAws, - KeyManagerTypeCustomer, - } -} - -const ( - // KeySpecRsa2048 is a KeySpec enum value - KeySpecRsa2048 = "RSA_2048" - - // KeySpecRsa3072 is a KeySpec enum value - KeySpecRsa3072 = "RSA_3072" - - // KeySpecRsa4096 is a KeySpec enum value - KeySpecRsa4096 = "RSA_4096" - - // KeySpecEccNistP256 is a KeySpec enum value - KeySpecEccNistP256 = "ECC_NIST_P256" - - // KeySpecEccNistP384 is a KeySpec enum value - KeySpecEccNistP384 = "ECC_NIST_P384" - - // KeySpecEccNistP521 is a KeySpec enum value - KeySpecEccNistP521 = "ECC_NIST_P521" - - // KeySpecEccSecgP256k1 is a KeySpec enum value - KeySpecEccSecgP256k1 = "ECC_SECG_P256K1" - - // KeySpecSymmetricDefault is a KeySpec enum value - KeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" - - // KeySpecHmac224 is a KeySpec enum value - KeySpecHmac224 = "HMAC_224" - - // KeySpecHmac256 is a KeySpec enum value - KeySpecHmac256 = "HMAC_256" - - // KeySpecHmac384 is a KeySpec enum value - KeySpecHmac384 = "HMAC_384" - - // KeySpecHmac512 is a KeySpec enum value - KeySpecHmac512 = "HMAC_512" -) - -// KeySpec_Values returns all elements of the KeySpec enum -func KeySpec_Values() []string { - return []string{ - KeySpecRsa2048, - KeySpecRsa3072, - KeySpecRsa4096, - KeySpecEccNistP256, - KeySpecEccNistP384, - KeySpecEccNistP521, - KeySpecEccSecgP256k1, - KeySpecSymmetricDefault, - KeySpecHmac224, - KeySpecHmac256, - KeySpecHmac384, - KeySpecHmac512, - } -} - -const ( - // KeyStateCreating is a KeyState enum value - KeyStateCreating = "Creating" - - // KeyStateEnabled is a KeyState enum value - KeyStateEnabled = "Enabled" - - // KeyStateDisabled is a KeyState enum value - KeyStateDisabled = "Disabled" - - // KeyStatePendingDeletion is a KeyState enum value - KeyStatePendingDeletion = "PendingDeletion" - - // KeyStatePendingImport is a KeyState enum value - KeyStatePendingImport = "PendingImport" - - // KeyStatePendingReplicaDeletion is a KeyState enum value - KeyStatePendingReplicaDeletion = "PendingReplicaDeletion" - - // KeyStateUnavailable is a KeyState enum value - KeyStateUnavailable = "Unavailable" - - // KeyStateUpdating is a KeyState enum value - KeyStateUpdating = "Updating" -) - -// KeyState_Values returns all elements of the KeyState enum -func KeyState_Values() []string { - return []string{ - KeyStateCreating, - KeyStateEnabled, - KeyStateDisabled, - KeyStatePendingDeletion, - KeyStatePendingImport, - KeyStatePendingReplicaDeletion, - KeyStateUnavailable, - KeyStateUpdating, - } -} - -const ( - // KeyUsageTypeSignVerify is a KeyUsageType enum value - KeyUsageTypeSignVerify = "SIGN_VERIFY" - - // KeyUsageTypeEncryptDecrypt is a KeyUsageType enum value - KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" - - // KeyUsageTypeGenerateVerifyMac is a KeyUsageType enum value - KeyUsageTypeGenerateVerifyMac = "GENERATE_VERIFY_MAC" -) - -// KeyUsageType_Values returns all elements of the KeyUsageType enum -func KeyUsageType_Values() []string { - return []string{ - KeyUsageTypeSignVerify, - KeyUsageTypeEncryptDecrypt, - KeyUsageTypeGenerateVerifyMac, - } -} - -const ( - // MacAlgorithmSpecHmacSha224 is a MacAlgorithmSpec enum value - MacAlgorithmSpecHmacSha224 = "HMAC_SHA_224" - - // MacAlgorithmSpecHmacSha256 is a MacAlgorithmSpec enum value - MacAlgorithmSpecHmacSha256 = "HMAC_SHA_256" - - // MacAlgorithmSpecHmacSha384 is a MacAlgorithmSpec enum value - MacAlgorithmSpecHmacSha384 = "HMAC_SHA_384" - - // MacAlgorithmSpecHmacSha512 is a MacAlgorithmSpec enum value - MacAlgorithmSpecHmacSha512 = "HMAC_SHA_512" -) - -// MacAlgorithmSpec_Values returns all elements of the MacAlgorithmSpec enum -func MacAlgorithmSpec_Values() []string { - return []string{ - MacAlgorithmSpecHmacSha224, - MacAlgorithmSpecHmacSha256, - MacAlgorithmSpecHmacSha384, - MacAlgorithmSpecHmacSha512, - } -} - -const ( - // MessageTypeRaw is a MessageType enum value - MessageTypeRaw = "RAW" - - // MessageTypeDigest is a MessageType enum value - MessageTypeDigest = "DIGEST" -) - -// MessageType_Values returns all elements of the MessageType enum -func MessageType_Values() []string { - return []string{ - MessageTypeRaw, - MessageTypeDigest, - } -} - -const ( - // MultiRegionKeyTypePrimary is a MultiRegionKeyType enum value - MultiRegionKeyTypePrimary = "PRIMARY" - - // MultiRegionKeyTypeReplica is a MultiRegionKeyType enum value - MultiRegionKeyTypeReplica = "REPLICA" -) - -// MultiRegionKeyType_Values returns all elements of the MultiRegionKeyType enum -func MultiRegionKeyType_Values() []string { - return []string{ - MultiRegionKeyTypePrimary, - MultiRegionKeyTypeReplica, - } -} - -const ( - // OriginTypeAwsKms is a OriginType enum value - OriginTypeAwsKms = "AWS_KMS" - - // OriginTypeExternal is a OriginType enum value - OriginTypeExternal = "EXTERNAL" - - // OriginTypeAwsCloudhsm is a OriginType enum value - OriginTypeAwsCloudhsm = "AWS_CLOUDHSM" -) - -// OriginType_Values returns all elements of the OriginType enum -func OriginType_Values() []string { - return []string{ - OriginTypeAwsKms, - OriginTypeExternal, - OriginTypeAwsCloudhsm, - } -} - -const ( - // SigningAlgorithmSpecRsassaPssSha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha256 = "RSASSA_PSS_SHA_256" - - // SigningAlgorithmSpecRsassaPssSha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha384 = "RSASSA_PSS_SHA_384" - - // SigningAlgorithmSpecRsassaPssSha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPssSha512 = "RSASSA_PSS_SHA_512" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha256 = "RSASSA_PKCS1_V1_5_SHA_256" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha384 = "RSASSA_PKCS1_V1_5_SHA_384" - - // SigningAlgorithmSpecRsassaPkcs1V15Sha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecRsassaPkcs1V15Sha512 = "RSASSA_PKCS1_V1_5_SHA_512" - - // SigningAlgorithmSpecEcdsaSha256 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha256 = "ECDSA_SHA_256" - - // SigningAlgorithmSpecEcdsaSha384 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha384 = "ECDSA_SHA_384" - - // SigningAlgorithmSpecEcdsaSha512 is a SigningAlgorithmSpec enum value - SigningAlgorithmSpecEcdsaSha512 = "ECDSA_SHA_512" -) - -// SigningAlgorithmSpec_Values returns all elements of the SigningAlgorithmSpec enum -func SigningAlgorithmSpec_Values() []string { - return []string{ - SigningAlgorithmSpecRsassaPssSha256, - SigningAlgorithmSpecRsassaPssSha384, - SigningAlgorithmSpecRsassaPssSha512, - SigningAlgorithmSpecRsassaPkcs1V15Sha256, - SigningAlgorithmSpecRsassaPkcs1V15Sha384, - SigningAlgorithmSpecRsassaPkcs1V15Sha512, - SigningAlgorithmSpecEcdsaSha256, - SigningAlgorithmSpecEcdsaSha384, - SigningAlgorithmSpecEcdsaSha512, - } -} - -const ( - // WrappingKeySpecRsa2048 is a WrappingKeySpec enum value - WrappingKeySpecRsa2048 = "RSA_2048" -) - -// WrappingKeySpec_Values returns all elements of the WrappingKeySpec enum -func WrappingKeySpec_Values() []string { - return []string{ - WrappingKeySpecRsa2048, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go deleted file mode 100644 index 45cecea7f8..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package kms provides the client and types for making API -// requests to AWS Key Management Service. -// -// Key Management Service (KMS) is an encryption and key management web service. -// This guide describes the KMS operations that you can call programmatically. -// For general information about KMS, see the Key Management Service Developer -// Guide (https://docs.aws.amazon.com/kms/latest/developerguide/). -// -// KMS is replacing the term customer master key (CMK) with KMS key and KMS -// key. The concept has not changed. To prevent breaking changes, KMS is keeping -// some variations of this term. -// -// Amazon Web Services provides SDKs that consist of libraries and sample code -// for various programming languages and platforms (Java, Ruby, .Net, macOS, -// Android, etc.). The SDKs provide a convenient way to create programmatic -// access to KMS and other Amazon Web Services services. For example, the SDKs -// take care of tasks such as signing requests (see below), managing errors, -// and retrying requests automatically. For more information about the Amazon -// Web Services SDKs, including how to download and install them, see Tools -// for Amazon Web Services (http://aws.amazon.com/tools/). -// -// We recommend that you use the Amazon Web Services SDKs to make programmatic -// API calls to KMS. -// -// If you need to use FIPS 140-2 validated cryptographic modules when communicating -// with Amazon Web Services, use the FIPS endpoint in your preferred Amazon -// Web Services Region. For more information about the available FIPS endpoints, -// see Service endpoints (https://docs.aws.amazon.com/general/latest/gr/kms.html#kms_region) -// in the Key Management Service topic of the Amazon Web Services General Reference. -// -// Clients must support TLS (Transport Layer Security) 1.0. We recommend TLS -// 1.2. Clients must also support cipher suites with Perfect Forward Secrecy -// (PFS) such as Ephemeral Diffie-Hellman (DHE) or Elliptic Curve Ephemeral -// Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support -// these modes. -// -// Signing Requests -// -// Requests must be signed by using an access key ID and a secret access key. -// We strongly recommend that you do not use your Amazon Web Services account -// (root) access key ID and secret key for everyday work with KMS. Instead, -// use the access key ID and secret access key for an IAM user. You can also -// use the Amazon Web Services Security Token Service to generate temporary -// security credentials that you can use to sign requests. -// -// All KMS operations require Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// -// Logging API Requests -// -// KMS supports CloudTrail, a service that logs Amazon Web Services API calls -// and related events for your Amazon Web Services account and delivers them -// to an Amazon S3 bucket that you specify. By using the information collected -// by CloudTrail, you can determine what requests were made to KMS, who made -// the request, when it was made, and so on. To learn more about CloudTrail, -// including how to turn it on and find your log files, see the CloudTrail User -// Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/). -// -// Additional Resources -// -// For more information about credentials and request signing, see the following: -// -// * Amazon Web Services Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) -// - This topic provides general information about the types of credentials -// used to access Amazon Web Services. -// -// * Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) -// - This section of the IAM User Guide describes how to create and use temporary -// security credentials. -// -// * Signature Version 4 Signing Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) -// - This set of topics walks you through the process of signing a request -// using an access key ID and a secret access key. -// -// Commonly Used API Operations -// -// Of the API operations discussed in this guide, the following will prove the -// most useful for most applications. You will likely perform operations other -// than these, such as creating keys and assigning policies, by using the console. -// -// * Encrypt -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext -// -// See https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01 for more information on this service. -// -// See kms package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/ -// -// Using the Client -// -// To contact AWS Key Management Service with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the AWS Key Management Service client KMS for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/kms/#New -package kms diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go deleted file mode 100644 index 4f8fc21049..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/errors.go +++ /dev/null @@ -1,376 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "github.com/aws/aws-sdk-go/private/protocol" -) - -const ( - - // ErrCodeAlreadyExistsException for service response error code - // "AlreadyExistsException". - // - // The request was rejected because it attempted to create a resource that already - // exists. - ErrCodeAlreadyExistsException = "AlreadyExistsException" - - // ErrCodeCloudHsmClusterInUseException for service response error code - // "CloudHsmClusterInUseException". - // - // The request was rejected because the specified CloudHSM cluster is already - // associated with a custom key store or it shares a backup history with a cluster - // that is associated with a custom key store. Each custom key store must be - // associated with a different CloudHSM cluster. - // - // Clusters that share a backup history have the same cluster certificate. To - // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - ErrCodeCloudHsmClusterInUseException = "CloudHsmClusterInUseException" - - // ErrCodeCloudHsmClusterInvalidConfigurationException for service response error code - // "CloudHsmClusterInvalidConfigurationException". - // - // The request was rejected because the associated CloudHSM cluster did not - // meet the configuration requirements for a custom key store. - // - // * The cluster must be configured with private subnets in at least two - // different Availability Zones in the Region. - // - // * The security group for the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // (cloudhsm-cluster--sg) must include inbound rules and outbound - // rules that allow TCP traffic on ports 2223-2225. The Source in the inbound - // rules and the Destination in the outbound rules must match the security - // group ID. These rules are set by default when you create the cluster. - // Do not delete or change them. To get information about a particular security - // group, use the DescribeSecurityGroups (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) - // operation. - // - // * The cluster must contain at least as many HSMs as the operation requires. - // To add HSMs, use the CloudHSM CreateHsm (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_CreateHsm.html) - // operation. For the CreateCustomKeyStore, UpdateCustomKeyStore, and CreateKey - // operations, the CloudHSM cluster must have at least two active HSMs, each - // in a different Availability Zone. For the ConnectCustomKeyStore operation, - // the CloudHSM must contain at least one active HSM. - // - // For information about the requirements for an CloudHSM cluster that is associated - // with a custom key store, see Assemble the Prerequisites (https://docs.aws.amazon.com/kms/latest/developerguide/create-keystore.html#before-keystore) - // in the Key Management Service Developer Guide. For information about creating - // a private subnet for an CloudHSM cluster, see Create a Private Subnet (https://docs.aws.amazon.com/cloudhsm/latest/userguide/create-subnets.html) - // in the CloudHSM User Guide. For information about cluster security groups, - // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) - // in the CloudHSM User Guide . - ErrCodeCloudHsmClusterInvalidConfigurationException = "CloudHsmClusterInvalidConfigurationException" - - // ErrCodeCloudHsmClusterNotActiveException for service response error code - // "CloudHsmClusterNotActiveException". - // - // The request was rejected because the CloudHSM cluster that is associated - // with the custom key store is not active. Initialize and activate the cluster - // and try the command again. For detailed instructions, see Getting Started - // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) - // in the CloudHSM User Guide. - ErrCodeCloudHsmClusterNotActiveException = "CloudHsmClusterNotActiveException" - - // ErrCodeCloudHsmClusterNotFoundException for service response error code - // "CloudHsmClusterNotFoundException". - // - // The request was rejected because KMS cannot find the CloudHSM cluster with - // the specified cluster ID. Retry the request with a different cluster ID. - ErrCodeCloudHsmClusterNotFoundException = "CloudHsmClusterNotFoundException" - - // ErrCodeCloudHsmClusterNotRelatedException for service response error code - // "CloudHsmClusterNotRelatedException". - // - // The request was rejected because the specified CloudHSM cluster has a different - // cluster certificate than the original cluster. You cannot use the operation - // to specify an unrelated cluster. - // - // Specify a cluster that shares a backup history with the original cluster. - // This includes clusters that were created from a backup of the current cluster, - // and clusters that were created from the same backup that produced the current - // cluster. - // - // Clusters that share a backup history have the same cluster certificate. To - // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) - // operation. - ErrCodeCloudHsmClusterNotRelatedException = "CloudHsmClusterNotRelatedException" - - // ErrCodeCustomKeyStoreHasCMKsException for service response error code - // "CustomKeyStoreHasCMKsException". - // - // The request was rejected because the custom key store contains KMS keys. - // After verifying that you do not need to use the KMS keys, use the ScheduleKeyDeletion - // operation to delete the KMS keys. After they are deleted, you can delete - // the custom key store. - ErrCodeCustomKeyStoreHasCMKsException = "CustomKeyStoreHasCMKsException" - - // ErrCodeCustomKeyStoreInvalidStateException for service response error code - // "CustomKeyStoreInvalidStateException". - // - // The request was rejected because of the ConnectionState of the custom key - // store. To get the ConnectionState of a custom key store, use the DescribeCustomKeyStores - // operation. - // - // This exception is thrown under the following conditions: - // - // * You requested the CreateKey or GenerateRandom operation in a custom - // key store that is not connected. These operations are valid only when - // the custom key store ConnectionState is CONNECTED. - // - // * You requested the UpdateCustomKeyStore or DeleteCustomKeyStore operation - // on a custom key store that is not disconnected. This operation is valid - // only when the custom key store ConnectionState is DISCONNECTED. - // - // * You requested the ConnectCustomKeyStore operation on a custom key store - // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid - // for all other ConnectionState values. - ErrCodeCustomKeyStoreInvalidStateException = "CustomKeyStoreInvalidStateException" - - // ErrCodeCustomKeyStoreNameInUseException for service response error code - // "CustomKeyStoreNameInUseException". - // - // The request was rejected because the specified custom key store name is already - // assigned to another custom key store in the account. Try again with a custom - // key store name that is unique in the account. - ErrCodeCustomKeyStoreNameInUseException = "CustomKeyStoreNameInUseException" - - // ErrCodeCustomKeyStoreNotFoundException for service response error code - // "CustomKeyStoreNotFoundException". - // - // The request was rejected because KMS cannot find a custom key store with - // the specified key store name or ID. - ErrCodeCustomKeyStoreNotFoundException = "CustomKeyStoreNotFoundException" - - // ErrCodeDependencyTimeoutException for service response error code - // "DependencyTimeoutException". - // - // The system timed out while trying to fulfill the request. The request can - // be retried. - ErrCodeDependencyTimeoutException = "DependencyTimeoutException" - - // ErrCodeDisabledException for service response error code - // "DisabledException". - // - // The request was rejected because the specified KMS key is not enabled. - ErrCodeDisabledException = "DisabledException" - - // ErrCodeExpiredImportTokenException for service response error code - // "ExpiredImportTokenException". - // - // The request was rejected because the specified import token is expired. Use - // GetParametersForImport to get a new import token and public key, use the - // new public key to encrypt the key material, and then try the request again. - ErrCodeExpiredImportTokenException = "ExpiredImportTokenException" - - // ErrCodeIncorrectKeyException for service response error code - // "IncorrectKeyException". - // - // The request was rejected because the specified KMS key cannot decrypt the - // data. The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request - // must identify the same KMS key that was used to encrypt the ciphertext. - ErrCodeIncorrectKeyException = "IncorrectKeyException" - - // ErrCodeIncorrectKeyMaterialException for service response error code - // "IncorrectKeyMaterialException". - // - // The request was rejected because the key material in the request is, expired, - // invalid, or is not the same key material that was previously imported into - // this KMS key. - ErrCodeIncorrectKeyMaterialException = "IncorrectKeyMaterialException" - - // ErrCodeIncorrectTrustAnchorException for service response error code - // "IncorrectTrustAnchorException". - // - // The request was rejected because the trust anchor certificate in the request - // is not the trust anchor certificate for the specified CloudHSM cluster. - // - // When you initialize the cluster (https://docs.aws.amazon.com/cloudhsm/latest/userguide/initialize-cluster.html#sign-csr), - // you create the trust anchor certificate and save it in the customerCA.crt - // file. - ErrCodeIncorrectTrustAnchorException = "IncorrectTrustAnchorException" - - // ErrCodeInternalException for service response error code - // "KMSInternalException". - // - // The request was rejected because an internal exception occurred. The request - // can be retried. - ErrCodeInternalException = "KMSInternalException" - - // ErrCodeInvalidAliasNameException for service response error code - // "InvalidAliasNameException". - // - // The request was rejected because the specified alias name is not valid. - ErrCodeInvalidAliasNameException = "InvalidAliasNameException" - - // ErrCodeInvalidArnException for service response error code - // "InvalidArnException". - // - // The request was rejected because a specified ARN, or an ARN in a key policy, - // is not valid. - ErrCodeInvalidArnException = "InvalidArnException" - - // ErrCodeInvalidCiphertextException for service response error code - // "InvalidCiphertextException". - // - // From the Decrypt or ReEncrypt operation, the request was rejected because - // the specified ciphertext, or additional authenticated data incorporated into - // the ciphertext, such as the encryption context, is corrupted, missing, or - // otherwise invalid. - // - // From the ImportKeyMaterial operation, the request was rejected because KMS - // could not decrypt the encrypted (wrapped) key material. - ErrCodeInvalidCiphertextException = "InvalidCiphertextException" - - // ErrCodeInvalidGrantIdException for service response error code - // "InvalidGrantIdException". - // - // The request was rejected because the specified GrantId is not valid. - ErrCodeInvalidGrantIdException = "InvalidGrantIdException" - - // ErrCodeInvalidGrantTokenException for service response error code - // "InvalidGrantTokenException". - // - // The request was rejected because the specified grant token is not valid. - ErrCodeInvalidGrantTokenException = "InvalidGrantTokenException" - - // ErrCodeInvalidImportTokenException for service response error code - // "InvalidImportTokenException". - // - // The request was rejected because the provided import token is invalid or - // is associated with a different KMS key. - ErrCodeInvalidImportTokenException = "InvalidImportTokenException" - - // ErrCodeInvalidKeyUsageException for service response error code - // "InvalidKeyUsageException". - // - // The request was rejected for one of the following reasons: - // - // * The KeyUsage value of the KMS key is incompatible with the API operation. - // - // * The encryption algorithm or signing algorithm specified for the operation - // is incompatible with the type of key material in the KMS key (KeySpec). - // - // For encrypting, decrypting, re-encrypting, and generating data keys, the - // KeyUsage must be ENCRYPT_DECRYPT. For signing and verifying messages, the - // KeyUsage must be SIGN_VERIFY. For generating and verifying message authentication - // codes (MACs), the KeyUsage must be GENERATE_VERIFY_MAC. To find the KeyUsage - // of a KMS key, use the DescribeKey operation. - // - // To find the encryption or signing algorithms supported for a particular KMS - // key, use the DescribeKey operation. - ErrCodeInvalidKeyUsageException = "InvalidKeyUsageException" - - // ErrCodeInvalidMarkerException for service response error code - // "InvalidMarkerException". - // - // The request was rejected because the marker that specifies where pagination - // should next begin is not valid. - ErrCodeInvalidMarkerException = "InvalidMarkerException" - - // ErrCodeInvalidStateException for service response error code - // "KMSInvalidStateException". - // - // The request was rejected because the state of the specified resource is not - // valid for this request. - // - // For more information about how key state affects the use of a KMS key, see - // Key states of KMS keys (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) - // in the Key Management Service Developer Guide . - ErrCodeInvalidStateException = "KMSInvalidStateException" - - // ErrCodeKMSInvalidMacException for service response error code - // "KMSInvalidMacException". - // - // The request was rejected because the HMAC verification failed. HMAC verification - // fails when the HMAC computed by using the specified message, HMAC KMS key, - // and MAC algorithm does not match the HMAC specified in the request. - ErrCodeKMSInvalidMacException = "KMSInvalidMacException" - - // ErrCodeKMSInvalidSignatureException for service response error code - // "KMSInvalidSignatureException". - // - // The request was rejected because the signature verification failed. Signature - // verification fails when it cannot confirm that signature was produced by - // signing the specified message with the specified KMS key and signing algorithm. - ErrCodeKMSInvalidSignatureException = "KMSInvalidSignatureException" - - // ErrCodeKeyUnavailableException for service response error code - // "KeyUnavailableException". - // - // The request was rejected because the specified KMS key was not available. - // You can retry the request. - ErrCodeKeyUnavailableException = "KeyUnavailableException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // The request was rejected because a quota was exceeded. For more information, - // see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) - // in the Key Management Service Developer Guide. - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeMalformedPolicyDocumentException for service response error code - // "MalformedPolicyDocumentException". - // - // The request was rejected because the specified policy is not syntactically - // or semantically correct. - ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocumentException" - - // ErrCodeNotFoundException for service response error code - // "NotFoundException". - // - // The request was rejected because the specified entity or resource could not - // be found. - ErrCodeNotFoundException = "NotFoundException" - - // ErrCodeTagException for service response error code - // "TagException". - // - // The request was rejected because one or more tags are not valid. - ErrCodeTagException = "TagException" - - // ErrCodeUnsupportedOperationException for service response error code - // "UnsupportedOperationException". - // - // The request was rejected because a specified parameter is not supported or - // a specified resource is not valid for this operation. - ErrCodeUnsupportedOperationException = "UnsupportedOperationException" -) - -var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "AlreadyExistsException": newErrorAlreadyExistsException, - "CloudHsmClusterInUseException": newErrorCloudHsmClusterInUseException, - "CloudHsmClusterInvalidConfigurationException": newErrorCloudHsmClusterInvalidConfigurationException, - "CloudHsmClusterNotActiveException": newErrorCloudHsmClusterNotActiveException, - "CloudHsmClusterNotFoundException": newErrorCloudHsmClusterNotFoundException, - "CloudHsmClusterNotRelatedException": newErrorCloudHsmClusterNotRelatedException, - "CustomKeyStoreHasCMKsException": newErrorCustomKeyStoreHasCMKsException, - "CustomKeyStoreInvalidStateException": newErrorCustomKeyStoreInvalidStateException, - "CustomKeyStoreNameInUseException": newErrorCustomKeyStoreNameInUseException, - "CustomKeyStoreNotFoundException": newErrorCustomKeyStoreNotFoundException, - "DependencyTimeoutException": newErrorDependencyTimeoutException, - "DisabledException": newErrorDisabledException, - "ExpiredImportTokenException": newErrorExpiredImportTokenException, - "IncorrectKeyException": newErrorIncorrectKeyException, - "IncorrectKeyMaterialException": newErrorIncorrectKeyMaterialException, - "IncorrectTrustAnchorException": newErrorIncorrectTrustAnchorException, - "KMSInternalException": newErrorInternalException, - "InvalidAliasNameException": newErrorInvalidAliasNameException, - "InvalidArnException": newErrorInvalidArnException, - "InvalidCiphertextException": newErrorInvalidCiphertextException, - "InvalidGrantIdException": newErrorInvalidGrantIdException, - "InvalidGrantTokenException": newErrorInvalidGrantTokenException, - "InvalidImportTokenException": newErrorInvalidImportTokenException, - "InvalidKeyUsageException": newErrorInvalidKeyUsageException, - "InvalidMarkerException": newErrorInvalidMarkerException, - "KMSInvalidStateException": newErrorInvalidStateException, - "KMSInvalidMacException": newErrorKMSInvalidMacException, - "KMSInvalidSignatureException": newErrorKMSInvalidSignatureException, - "KeyUnavailableException": newErrorKeyUnavailableException, - "LimitExceededException": newErrorLimitExceededException, - "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException, - "NotFoundException": newErrorNotFoundException, - "TagException": newErrorTagException, - "UnsupportedOperationException": newErrorUnsupportedOperationException, -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go deleted file mode 100644 index 18dfb8c788..0000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ /dev/null @@ -1,108 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package kms - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// KMS provides the API operation methods for making requests to -// AWS Key Management Service. See this package's package overview docs -// for details on the service. -// -// KMS methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type KMS struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "kms" // Name of service. - EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "KMS" // ServiceID is a unique identifier of a specific service. -) - -// New creates a new instance of the KMS client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a KMS client from just a session. -// svc := kms.New(mySession) -// -// // Create a KMS client with additional configuration -// svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = EndpointsID - // No Fallback - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *KMS { - svc := &KMS{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2014-11-01", - ResolvedRegion: resolvedRegion, - JSONVersion: "1.1", - TargetPrefix: "TrentService", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed( - protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), - ) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a KMS operation and runs any -// custom request initialization. -func (c *KMS) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go index 948f060cab..b8f590f71d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -29,14 +29,13 @@ const opGetRoleCredentials = "GetRoleCredentials" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) // -// // Example sending a request using the GetRoleCredentialsRequest method. -// req, resp := client.GetRoleCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { @@ -69,20 +68,21 @@ func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *re // API operation GetRoleCredentials for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { @@ -122,14 +122,13 @@ const opListAccountRoles = "ListAccountRoles" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) // -// // Example sending a request using the ListAccountRolesRequest method. -// req, resp := client.ListAccountRolesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { @@ -167,20 +166,21 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques // API operation ListAccountRoles for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { @@ -212,15 +212,14 @@ func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRol // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccountRoles operation. -// pageNum := 0 -// err := client.ListAccountRolesPages(params, -// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -272,14 +271,13 @@ const opListAccounts = "ListAccounts" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) // -// // Example sending a request using the ListAccountsRequest method. -// req, resp := client.ListAccountsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { @@ -310,7 +308,8 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // Lists all AWS accounts assigned to the user. These AWS accounts are assigned // by the administrator of the account. For more information, see Assign User // Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the AWS SSO User Guide. This operation returns a paginated response. +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -320,20 +319,21 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // API operation ListAccounts for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { @@ -365,15 +365,14 @@ func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccounts operation. -// pageNum := 0 -// err := client.ListAccountsPages(params, -// func(page *sso.ListAccountsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -425,14 +424,13 @@ const opLogout = "Logout" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) // -// // Example sending a request using the LogoutRequest method. -// req, resp := client.LogoutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { @@ -455,7 +453,21 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // Logout API operation for AWS Single Sign-On. // -// Removes the client- and server-side session that is associated with the user. +// Removes the locally stored SSO tokens from the client-side cache and sends +// an API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, +// IAM Identity Center assumes an IAM role in the target account on behalf of +// the user, and the corresponding temporary AWS credentials are returned to +// the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured +// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -465,17 +477,18 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // API operation Logout for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { @@ -554,7 +567,7 @@ type GetRoleCredentialsInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetRoleCredentialsInput's @@ -730,7 +743,7 @@ type ListAccountRolesInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountRolesInput's @@ -859,7 +872,7 @@ type ListAccountsInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountsInput's @@ -974,7 +987,7 @@ type LogoutInput struct { // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by LogoutInput's diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go index 92d82b2afb..15e61a3228 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -3,30 +3,31 @@ // Package sso provides the client and types for making API // requests to AWS Single Sign-On. // -// AWS Single Sign-On Portal is a web service that makes it easy for you to -// assign user access to AWS SSO resources such as the user portal. Users can -// get AWS account applications and roles assigned to them and get federated -// into the application. +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity +// Center resources such as the AWS access portal. Users can get AWS account +// applications and roles assigned to them and get federated into the application. // -// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the AWS SSO User Guide. +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). // -// This API reference guide describes the AWS SSO Portal operations that you -// can call programatically and includes detailed information on data types -// and errors. +// This reference guide describes the IAM Identity Center Portal operations +// that you can call programatically and includes detailed information on data +// types and errors. // // AWS provides SDKs that consist of libraries and sample code for various programming // languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs -// provide a convenient way to create programmatic access to AWS SSO and other -// AWS services. For more information about the AWS SDKs, including how to download -// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// provide a convenient way to create programmatic access to IAM Identity Center +// and other AWS services. For more information about the AWS SDKs, including +// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). // // See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. // // See sso package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ // -// Using the Client +// # Using the Client // // To contact AWS Single Sign-On with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go index 7a28dc797e..7094cfe413 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -40,13 +40,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a SSO client from just a session. -// svc := sso.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a SSO client with additional configuration -// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go index 4cac247c18..818cab7cda 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Single Sign-On. -// func myFunc(svc ssoiface.SSOAPI) bool { -// // Make svc.GetRoleCredentials request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } // -// func main() { -// sess := session.New() -// svc := sso.New(sess) +// func main() { +// sess := session.New() +// svc := sso.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSSOClient struct { -// ssoiface.SSOAPI -// } -// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSSOClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 718409b549..2b7e675ab8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -28,14 +28,13 @@ const opAssumeRole = "AssumeRole" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) // -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { @@ -66,7 +65,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRole can be used to make // API calls to any Amazon Web Services service with the following exception: @@ -105,10 +104,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // To allow a user to assume a role in the same account, you can do either of // the following: // -// * Attach a policy to the user that allows the user to call AssumeRole -// (as long as the role's trust policy trusts the account). +// - Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). // -// * Add the user as a principal directly in the role's trust policy. +// - Add the user as a principal directly in the role's trust policy. // // You can do either because the role’s trust policy acts as an IAM resource-based // policy. When a resource-based policy grants access to a principal in the @@ -116,7 +115,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These tags are // called session tags. For more information about session tags, see Passing @@ -134,7 +133,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Using MFA with AssumeRole +// # Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information // when you call AssumeRole. This is useful for cross-account scenarios to ensure @@ -163,35 +162,36 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // API operation AssumeRole for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { @@ -231,14 +231,13 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) // -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { @@ -274,7 +273,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // can use these temporary security credentials to sign calls to Amazon Web // Services services. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter @@ -300,7 +299,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a role using role chaining and provide a DurationSeconds parameter value // greater than one hour, the operation fails. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any Amazon Web Services service with the following exception: @@ -331,7 +330,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // identifiable information (PII). For example, you could instead use the persistent // identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your SAML assertion // as session tags. Each session tag consists of a key name and an associated @@ -365,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// SAML Configuration +// # SAML Configuration // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by Amazon Web Services. @@ -376,17 +375,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. // -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. // -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -396,47 +395,48 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // API operation AssumeRoleWithSAML for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { @@ -476,14 +476,13 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) // -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { @@ -540,7 +539,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // temporary security credentials to sign calls to Amazon Web Services service // API operations. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -555,7 +554,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any Amazon Web Services service with the following @@ -576,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your web identity // token as session tags. Each session tag consists of a key name and an associated @@ -610,7 +609,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Identities +// # Identities // // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that @@ -628,24 +627,24 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to Amazon Web Services. +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. // -// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -655,54 +654,55 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // API operation AssumeRoleWithWebIdentity for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { @@ -742,14 +742,13 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) // -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { @@ -793,18 +792,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // The decoded message includes the following type of information: // -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. // -// * The principal who made the request. +// - The principal who made the request. // -// * The requested action. +// - The requested action. // -// * The requested resource. +// - The requested resource. // -// * The values of condition keys in the context of the user's request. +// - The values of condition keys in the context of the user's request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -814,10 +813,10 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // API operation DecodeAuthorizationMessage for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -857,14 +856,13 @@ const opGetAccessKeyInfo = "GetAccessKeyInfo" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) // -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { @@ -954,14 +952,13 @@ const opGetCallerIdentity = "GetCallerIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) // -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { @@ -1037,14 +1034,13 @@ const opGetFederationToken = "GetFederationToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) // -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { @@ -1094,7 +1090,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // -// Session duration +// # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default @@ -1102,15 +1098,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // by using the Amazon Web Services account root user credentials have a maximum // duration of 3,600 seconds (1 hour). // -// Permissions +// # Permissions // // You can use the temporary credentials created by GetFederationToken in any // Amazon Web Services service except the following: // -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. // -// * You cannot call any STS operations except GetCallerIdentity. +// - You cannot call any STS operations except GetCallerIdentity. // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an @@ -1136,7 +1132,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // by the policy. These permissions are granted in addition to the permissions // granted by the session policies. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These are called // session tags. For more information about session tags, see Passing Session @@ -1172,31 +1168,32 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // API operation GetFederationToken for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { @@ -1236,14 +1233,13 @@ const opGetSessionToken = "GetSessionToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) // -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { @@ -1279,7 +1275,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Session Duration +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. +// +// # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon // Web Services security credentials of the Amazon Web Services account root @@ -1290,15 +1292,15 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a // default of 1 hour. // -// Permissions +// # Permissions // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any Amazon Web Services service with the following exceptions: // -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. +// - You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity. // // We recommend that you do not call GetSessionToken with Amazon Web Services // account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) @@ -1324,13 +1326,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // API operation GetSessionToken for usage and error information. // // Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index 2d98d92353..c40f5a2a52 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -14,7 +14,7 @@ // See sts package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ // -// Using the Client +// # Using the Client // // To contact AWS Security Token Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index f324ff108a..12327d0533 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -39,13 +39,14 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a STS client from just a session. -// svc := sts.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go index e2e1d6efe5..bf06b2e7d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } // -// func main() { -// sess := session.New() -// svc := sts.New(sess) +// func main() { +// sess := session.New() +// svc := sts.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index a5b73cf60e..a55d00d9f3 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,35 @@ +# Release (v1.13.2) + +* No change notes available for this release. + +# Release (v1.13.1) + +* No change notes available for this release. + +# Release (v1.13.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.13.0 + * **Feature**: Adds support for the Smithy httpBearerAuth authentication trait to smithy-go. This allows the SDK to support the bearer authentication flow for API operations decorated with httpBearerAuth. An API client will need to be provided with its own bearer.TokenProvider implementation or use the bearer.StaticTokenProvider implementation. + +# Release (v1.12.1) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.1 + * **Bug Fix**: Fixes a bug where JSON object keys were not escaped. + +# Release (v1.12.0) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.12.0 + * **Feature**: `transport/http`: Add utility for setting context metadata when operation serializer automatically assigns content-type default value. + +# Release (v1.11.3) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.11.3 + * **Dependency Update**: Updates smithy-go unit test dependency go-cmp to 0.5.8. + # Release (v1.11.2) * No change notes available for this release. diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/docs.go b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go new file mode 100644 index 0000000000..1c9b9715cb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/docs.go @@ -0,0 +1,3 @@ +// Package bearer provides middleware and utilities for authenticating API +// operation calls with a Bearer Token. +package bearer diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go new file mode 100644 index 0000000000..8c7d720995 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/middleware.go @@ -0,0 +1,104 @@ +package bearer + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Message is the middleware stack's request transport message value. +type Message interface{} + +// Signer provides an interface for implementations to decorate a request +// message with a bearer token. The signer is responsible for validating the +// message type is compatible with the signer. +type Signer interface { + SignWithBearerToken(context.Context, Token, Message) (Message, error) +} + +// AuthenticationMiddleware provides the Finalize middleware step for signing +// an request message with a bearer token. +type AuthenticationMiddleware struct { + signer Signer + tokenProvider TokenProvider +} + +// AddAuthenticationMiddleware helper adds the AuthenticationMiddleware to the +// middleware Stack in the Finalize step with the options provided. +func AddAuthenticationMiddleware(s *middleware.Stack, signer Signer, tokenProvider TokenProvider) error { + return s.Finalize.Add( + NewAuthenticationMiddleware(signer, tokenProvider), + middleware.After, + ) +} + +// NewAuthenticationMiddleware returns an initialized AuthenticationMiddleware. +func NewAuthenticationMiddleware(signer Signer, tokenProvider TokenProvider) *AuthenticationMiddleware { + return &AuthenticationMiddleware{ + signer: signer, + tokenProvider: tokenProvider, + } +} + +const authenticationMiddlewareID = "BearerTokenAuthentication" + +// ID returns the resolver identifier +func (m *AuthenticationMiddleware) ID() string { + return authenticationMiddlewareID +} + +// HandleFinalize implements the FinalizeMiddleware interface in order to +// update the request with bearer token authentication. +func (m *AuthenticationMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + token, err := m.tokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware wrap message, %w", err) + } + + signedMessage, err := m.signer.SignWithBearerToken(ctx, token, in.Request) + if err != nil { + return out, metadata, fmt.Errorf("failed AuthenticationMiddleware sign message, %w", err) + } + + in.Request = signedMessage + return next.HandleFinalize(ctx, in) +} + +// SignHTTPSMessage provides a bearer token authentication implementation that +// will sign the message with the provided bearer token. +// +// Will fail if the message is not a smithy-go HTTP request or the request is +// not HTTPS. +type SignHTTPSMessage struct{} + +// NewSignHTTPSMessage returns an initialized signer for HTTP messages. +func NewSignHTTPSMessage() *SignHTTPSMessage { + return &SignHTTPSMessage{} +} + +// SignWithBearerToken returns a copy of the HTTP request with the bearer token +// added via the "Authorization" header, per RFC 6750, https://datatracker.ietf.org/doc/html/rfc6750. +// +// Returns an error if the request's URL scheme is not HTTPS, or the request +// message is not an smithy-go HTTP Request pointer type. +func (SignHTTPSMessage) SignWithBearerToken(ctx context.Context, token Token, message Message) (Message, error) { + req, ok := message.(*smithyhttp.Request) + if !ok { + return nil, fmt.Errorf("expect smithy-go HTTP Request, got %T", message) + } + + if !req.IsHTTPS() { + return nil, fmt.Errorf("bearer token with HTTP request requires HTTPS") + } + + reqClone := req.Clone() + reqClone.Header.Set("Authorization", "Bearer "+token.Value) + + return reqClone, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token.go b/vendor/github.com/aws/smithy-go/auth/bearer/token.go new file mode 100644 index 0000000000..be260d4c76 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "context" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpires is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(context.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(context.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx context.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(context.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go new file mode 100644 index 0000000000..223ddf52bb --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/bearer/token_cache.go @@ -0,0 +1,208 @@ +package bearer + +import ( + "context" + "fmt" + "sync/atomic" + "time" + + smithycontext "github.com/aws/smithy-go/context" + "github.com/aws/smithy-go/internal/sync/singleflight" +) + +// package variable that can be override in unit tests. +var timeNow = time.Now + +// TokenCacheOptions provides a set of optional configuration options for the +// TokenCache TokenProvider. +type TokenCacheOptions struct { + // The duration before the token will expire when the credentials will be + // refreshed. If DisableAsyncRefresh is true, the RetrieveBearerToken calls + // will be blocking. + // + // Asynchronous refreshes are deduplicated, and only one will be in-flight + // at a time. If the token expires while an asynchronous refresh is in + // flight, the next call to RetrieveBearerToken will block on that refresh + // to return. + RefreshBeforeExpires time.Duration + + // The timeout the underlying TokenProvider's RetrieveBearerToken call must + // return within, or will be canceled. Defaults to 0, no timeout. + // + // If 0 timeout, its possible for the underlying tokenProvider's + // RetrieveBearerToken call to block forever. Preventing subsequent + // TokenCache attempts to refresh the token. + // + // If this timeout is reached all pending deduplicated calls to + // TokenCache RetrieveBearerToken will fail with an error. + RetrieveBearerTokenTimeout time.Duration + + // The minimum duration between asynchronous refresh attempts. If the next + // asynchronous recent refresh attempt was within the minimum delay + // duration, the call to retrieve will return the current cached token, if + // not expired. + // + // The asynchronous retrieve is deduplicated across multiple calls when + // RetrieveBearerToken is called. The asynchronous retrieve is not a + // periodic task. It is only performed when the token has not yet expired, + // and the current item is within the RefreshBeforeExpires window, and the + // TokenCache's RetrieveBearerToken method is called. + // + // If 0, (default) there will be no minimum delay between asynchronous + // refresh attempts. + // + // If DisableAsyncRefresh is true, this option is ignored. + AsyncRefreshMinimumDelay time.Duration + + // Sets if the TokenCache will attempt to refresh the token in the + // background asynchronously instead of blocking for credentials to be + // refreshed. If disabled token refresh will be blocking. + // + // The first call to RetrieveBearerToken will always be blocking, because + // there is no cached token. + DisableAsyncRefresh bool +} + +// TokenCache provides an utility to cache Bearer Authentication tokens from a +// wrapped TokenProvider. The TokenCache can be has options to configure the +// cache's early and asynchronous refresh of the token. +type TokenCache struct { + options TokenCacheOptions + provider TokenProvider + + cachedToken atomic.Value + lastRefreshAttemptTime atomic.Value + sfGroup singleflight.Group +} + +// NewTokenCache returns a initialized TokenCache that implements the +// TokenProvider interface. Wrapping the provider passed in. Also taking a set +// of optional functional option parameters to configure the token cache. +func NewTokenCache(provider TokenProvider, optFns ...func(*TokenCacheOptions)) *TokenCache { + var options TokenCacheOptions + for _, fn := range optFns { + fn(&options) + } + + return &TokenCache{ + options: options, + provider: provider, + } +} + +// RetrieveBearerToken returns the token if it could be obtained, or error if a +// valid token could not be retrieved. +// +// The passed in Context's cancel/deadline/timeout will impacting only this +// individual retrieve call and not any other already queued up calls. This +// means underlying provider's RetrieveBearerToken calls could block for ever, +// and not be canceled with the Context. Set RetrieveBearerTokenTimeout to +// provide a timeout, preventing the underlying TokenProvider blocking forever. +// +// By default, if the passed in Context is canceled, all of its values will be +// considered expired. The wrapped TokenProvider will not be able to lookup the +// values from the Context once it is expired. This is done to protect against +// expired values no longer being valid. To disable this behavior, use +// smithy-go's context.WithPreserveExpiredValues to add a value to the Context +// before calling RetrieveBearerToken to enable support for expired values. +// +// Without RetrieveBearerTokenTimeout there is the potential for a underlying +// Provider's RetrieveBearerToken call to sit forever. Blocking in subsequent +// attempts at refreshing the token. +func (p *TokenCache) RetrieveBearerToken(ctx context.Context) (Token, error) { + cachedToken, ok := p.getCachedToken() + if !ok || cachedToken.Expired(timeNow()) { + return p.refreshBearerToken(ctx) + } + + // Check if the token should be refreshed before it expires. + refreshToken := cachedToken.Expired(timeNow().Add(p.options.RefreshBeforeExpires)) + if !refreshToken { + return cachedToken, nil + } + + if p.options.DisableAsyncRefresh { + return p.refreshBearerToken(ctx) + } + + p.tryAsyncRefresh(ctx) + + return cachedToken, nil +} + +// tryAsyncRefresh attempts to asynchronously refresh the token returning the +// already cached token. If it AsyncRefreshMinimumDelay option is not zero, and +// the duration since the last refresh is less than that value, nothing will be +// done. +func (p *TokenCache) tryAsyncRefresh(ctx context.Context) { + if p.options.AsyncRefreshMinimumDelay != 0 { + var lastRefreshAttempt time.Time + if v := p.lastRefreshAttemptTime.Load(); v != nil { + lastRefreshAttempt = v.(time.Time) + } + + if timeNow().Before(lastRefreshAttempt.Add(p.options.AsyncRefreshMinimumDelay)) { + return + } + } + + // Ignore the returned channel so this won't be blocking, and limit the + // number of additional goroutines created. + p.sfGroup.DoChan("async-refresh", func() (interface{}, error) { + res, err := p.refreshBearerToken(ctx) + if p.options.AsyncRefreshMinimumDelay != 0 { + var refreshAttempt time.Time + if err != nil { + refreshAttempt = timeNow() + } + p.lastRefreshAttemptTime.Store(refreshAttempt) + } + + return res, err + }) +} + +func (p *TokenCache) refreshBearerToken(ctx context.Context) (Token, error) { + resCh := p.sfGroup.DoChan("refresh-token", func() (interface{}, error) { + ctx := smithycontext.WithSuppressCancel(ctx) + if v := p.options.RetrieveBearerTokenTimeout; v != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, v) + defer cancel() + } + return p.singleRetrieve(ctx) + }) + + select { + case res := <-resCh: + return res.Val.(Token), res.Err + case <-ctx.Done(): + return Token{}, fmt.Errorf("retrieve bearer token canceled, %w", ctx.Err()) + } +} + +func (p *TokenCache) singleRetrieve(ctx context.Context) (interface{}, error) { + token, err := p.provider.RetrieveBearerToken(ctx) + if err != nil { + return Token{}, fmt.Errorf("failed to retrieve bearer token, %w", err) + } + + p.cachedToken.Store(&token) + return token, nil +} + +// getCachedToken returns the currently cached token and true if found. Returns +// false if no token is cached. +func (p *TokenCache) getCachedToken() (Token, bool) { + v := p.cachedToken.Load() + if v == nil { + return Token{}, false + } + + t := v.(*Token) + if t == nil || t.Value == "" { + return Token{}, false + } + + return *t, true +} diff --git a/vendor/github.com/aws/smithy-go/context/suppress_expired.go b/vendor/github.com/aws/smithy-go/context/suppress_expired.go new file mode 100644 index 0000000000..a39b84a278 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/context/suppress_expired.go @@ -0,0 +1,81 @@ +package context + +import "context" + +// valueOnlyContext provides a utility to preserve only the values of a +// Context. Suppressing any cancellation or deadline on that context being +// propagated downstream of this value. +// +// If preserveExpiredValues is false (default), and the valueCtx is canceled, +// calls to lookup values with the Values method, will always return nil. Setting +// preserveExpiredValues to true, will allow the valueOnlyContext to lookup +// values in valueCtx even if valueCtx is canceled. +// +// Based on the Go standard libraries net/lookup.go onlyValuesCtx utility. +// https://github.com/golang/go/blob/da2773fe3e2f6106634673a38dc3a6eb875fe7d8/src/net/lookup.go +type valueOnlyContext struct { + context.Context + + preserveExpiredValues bool + valuesCtx context.Context +} + +var _ context.Context = (*valueOnlyContext)(nil) + +// Value looks up the key, returning its value. If configured to not preserve +// values of expired context, and the wrapping context is canceled, nil will be +// returned. +func (v *valueOnlyContext) Value(key interface{}) interface{} { + if !v.preserveExpiredValues { + select { + case <-v.valuesCtx.Done(): + return nil + default: + } + } + + return v.valuesCtx.Value(key) +} + +// WithSuppressCancel wraps the Context value, suppressing its deadline and +// cancellation events being propagated downstream to consumer of the returned +// context. +// +// By default the wrapped Context's Values are available downstream until the +// wrapped Context is canceled. Once the wrapped Context is canceled, Values +// method called on the context return will no longer lookup any key. As they +// are now considered expired. +// +// To override this behavior, use WithPreserveExpiredValues on the Context +// before it is wrapped by WithSuppressCancel. This will make the Context +// returned by WithSuppressCancel allow lookup of expired values. +func WithSuppressCancel(ctx context.Context) context.Context { + return &valueOnlyContext{ + Context: context.Background(), + valuesCtx: ctx, + + preserveExpiredValues: GetPreserveExpiredValues(ctx), + } +} + +type preserveExpiredValuesKey struct{} + +// WithPreserveExpiredValues adds a Value to the Context if expired values +// should be preserved, and looked up by a Context wrapped by +// WithSuppressCancel. +// +// WithPreserveExpiredValues must be added as a value to a Context, before that +// Context is wrapped by WithSuppressCancel +func WithPreserveExpiredValues(ctx context.Context, enable bool) context.Context { + return context.WithValue(ctx, preserveExpiredValuesKey{}, enable) +} + +// GetPreserveExpiredValues looks up, and returns the PreserveExpressValues +// value in the context. Returning true if enabled, false otherwise. +func GetPreserveExpiredValues(ctx context.Context) bool { + v := ctx.Value(preserveExpiredValuesKey{}) + if v != nil { + return v.(bool) + } + return false +} diff --git a/vendor/github.com/aws/smithy-go/encoding/json/object.go b/vendor/github.com/aws/smithy-go/encoding/json/object.go index 15fb6478ce..722346d035 100644 --- a/vendor/github.com/aws/smithy-go/encoding/json/object.go +++ b/vendor/github.com/aws/smithy-go/encoding/json/object.go @@ -17,9 +17,7 @@ func newObject(w *bytes.Buffer, scratch *[]byte) *Object { } func (o *Object) writeKey(key string) { - o.w.WriteRune(quote) - o.w.Write([]byte(key)) - o.w.WriteRune(quote) + escapeStringBytes(o.w, []byte(key)) o.w.WriteRune(colon) } diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 7e252ec8c1..146875124b 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.2" +const goModuleVersion = "1.13.2" diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE new file mode 100644 index 0000000000..fe6a62006a --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go new file mode 100644 index 0000000000..9c9d02b94b --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/docs.go @@ -0,0 +1,8 @@ +// Package singleflight provides a duplicate function call suppression +// mechanism. This package is a fork of the Go golang.org/x/sync/singleflight +// package. The package is forked, because the package a part of the unstable +// and unversioned golang.org/x/sync module. +// +// https://github.com/golang/sync/tree/67f06af15bc961c363a7260195bcd53487529a21/singleflight + +package singleflight diff --git a/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..e8a1b17d56 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package singleflight + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + c.wg.Done() + g.mu.Lock() + defer g.mu.Unlock() + if !c.forgotten { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go index 49884e6afb..eac32b4bab 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_headers.go @@ -7,6 +7,85 @@ import ( "github.com/aws/smithy-go/middleware" ) +type isContentTypeAutoSet struct{} + +// SetIsContentTypeDefaultValue returns a Context specifying if the request's +// content-type header was set to a default value. +func SetIsContentTypeDefaultValue(ctx context.Context, isDefault bool) context.Context { + return context.WithValue(ctx, isContentTypeAutoSet{}, isDefault) +} + +// GetIsContentTypeDefaultValue returns if the content-type HTTP header on the +// request is a default value that was auto assigned by an operation +// serializer. Allows middleware post serialization to know if the content-type +// was auto set to a default value or not. +// +// Also returns false if the Context value was never updated to include if +// content-type was set to a default value. +func GetIsContentTypeDefaultValue(ctx context.Context) bool { + v, _ := ctx.Value(isContentTypeAutoSet{}).(bool) + return v +} + +// AddNoPayloadDefaultContentTypeRemover Adds the DefaultContentTypeRemover +// middleware to the stack after the operation serializer. This middleware will +// remove the content-type header from the request if it was set as a default +// value, and no request payload is present. +// +// Returns error if unable to add the middleware. +func AddNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + err = stack.Serialize.Insert(removeDefaultContentType{}, + "OperationSerializer", middleware.After) + if err != nil { + return fmt.Errorf("failed to add %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + } + + return nil +} + +// RemoveNoPayloadDefaultContentTypeRemover removes the +// DefaultContentTypeRemover middleware from the stack. Returns an error if +// unable to remove the middleware. +func RemoveNoPayloadDefaultContentTypeRemover(stack *middleware.Stack) (err error) { + _, err = stack.Serialize.Remove(removeDefaultContentType{}.ID()) + if err != nil { + return fmt.Errorf("failed to remove %s serialize middleware, %w", + removeDefaultContentType{}.ID(), err) + + } + return nil +} + +// removeDefaultContentType provides after serialization middleware that will +// remove the content-type header from an HTTP request if the header was set as +// a default value by the operation serializer, and there is no request payload. +type removeDefaultContentType struct{} + +// ID returns the middleware ID +func (removeDefaultContentType) ID() string { return "RemoveDefaultContentType" } + +// HandleSerialize implements the serialization middleware. +func (removeDefaultContentType) HandleSerialize( + ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, meta middleware.Metadata, err error, +) { + req, ok := input.Request.(*Request) + if !ok { + return out, meta, fmt.Errorf( + "unexpected request type %T for removeDefaultContentType middleware", + input.Request) + } + + if GetIsContentTypeDefaultValue(ctx) && req.GetStream() == nil { + req.Header.Del("Content-Type") + input.Request = req + } + + return next.HandleSerialize(ctx, input) +} + type headerValue struct { header string value string diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go index ffac684f4d..7177d6f957 100644 --- a/vendor/github.com/aws/smithy-go/transport/http/request.go +++ b/vendor/github.com/aws/smithy-go/transport/http/request.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" iointernal "github.com/aws/smithy-go/transport/http/internal/io" ) @@ -33,6 +34,14 @@ func NewStackRequest() interface{} { } } +// IsHTTPS returns if the request is HTTPS. Returns false if no endpoint URL is set. +func (r *Request) IsHTTPS() bool { + if r.URL == nil { + return false + } + return strings.EqualFold(r.URL.Scheme, "https") +} + // Clone returns a deep copy of the Request for the new context. A reference to // the Stream is copied, but the underlying stream is not copied. func (r *Request) Clone() *Request { diff --git a/vendor/github.com/PaesslerAG/gval/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore similarity index 82% rename from vendor/github.com/PaesslerAG/gval/.gitignore rename to vendor/github.com/cenkalti/backoff/v4/.gitignore index 98576e3004..50d95c548b 100644 --- a/vendor/github.com/PaesslerAG/gval/.gitignore +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -20,11 +20,6 @@ _cgo_export.* _testmain.go *.exe -*.test -coverage.out -manual_test.go -*.out -*.err - -.vscode \ No newline at end of file +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml new file mode 100644 index 0000000000..c79105c2fb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.13 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 0000000000..16abdfc084 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,32 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 0000000000..3676ee405d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 0000000000..48482330eb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 0000000000..2c56c1e718 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,161 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 0000000000..1ce2507ebc --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,112 @@ +package backoff + +import ( + "errors" + "time" +) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return cerr + } + + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 0000000000..df9d68bce5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000000..8120d0213c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 0000000000..28d58ca37c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/clbanning/mxj/v2/.travis.yml b/vendor/github.com/clbanning/mxj/v2/.travis.yml new file mode 100644 index 0000000000..9c8611554b --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: +- 1.x \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/LICENSE b/vendor/github.com/clbanning/mxj/v2/LICENSE new file mode 100644 index 0000000000..1ada8807df --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012-2021 Charles Banning . All rights reserved. + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/clbanning/mxj/v2/anyxml.go b/vendor/github.com/clbanning/mxj/v2/anyxml.go new file mode 100644 index 0000000000..63970ee249 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/anyxml.go @@ -0,0 +1,201 @@ +package mxj + +import ( + "bytes" + "encoding/xml" + "reflect" +) + +const ( + DefaultElementTag = "element" +) + +// Encode arbitrary value as XML. +// +// Note: unmarshaling the resultant +// XML may not return the original value, since tag labels may have been injected +// to create the XML representation of the value. +/* + Encode an arbitrary JSON object. + package main + + import ( + "encoding/json" + "fmt" + "github.com/clbanning/mxj" + ) + + func main() { + jsondata := []byte(`[ + { "somekey":"somevalue" }, + "string", + 3.14159265, + true + ]`) + var i interface{} + err := json.Unmarshal(jsondata, &i) + if err != nil { + // do something + } + x, err := mxj.AnyXmlIndent(i, "", " ", "mydoc") + if err != nil { + // do something else + } + fmt.Println(string(x)) + } + + output: + + somevalue + string + 3.14159265 + true + + +An extreme example is available in examples/goofy_map.go. +*/ +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXml( v, myRootTag, myElementTag). +func AnyXml(v interface{}, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte("<" + rt + ">"), nil + } + return []byte("<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.Marshal(v) + } + + var err error + s := new(bytes.Buffer) + p := new(pretty) + + var b []byte + switch v.(type) { + case []interface{}: + if _, err = s.WriteString("<" + rt + ">"); err != nil { + return nil, err + } + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = marshalMapToXmlIndent(false, s, tag, val, p) + } + } else { + err = marshalMapToXmlIndent(false, s, et, vv, p) + } + default: + err = marshalMapToXmlIndent(false, s, et, vv, p) + } + if err != nil { + break + } + } + if _, err = s.WriteString(""); err != nil { + return nil, err + } + b = s.Bytes() + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.Xml(rt) + default: + err = marshalMapToXmlIndent(false, s, rt, v, p) + b = s.Bytes() + } + + return b, err +} + +// Encode an arbitrary value as a pretty XML string. +// Alternative values for DefaultRootTag and DefaultElementTag can be set as: +// AnyXmlIndent( v, "", " ", myRootTag, myElementTag). +func AnyXmlIndent(v interface{}, prefix, indent string, tags ...string) ([]byte, error) { + var rt, et string + if len(tags) == 1 || len(tags) == 2 { + rt = tags[0] + } else { + rt = DefaultRootTag + } + if len(tags) == 2 { + et = tags[1] + } else { + et = DefaultElementTag + } + + if v == nil { + if useGoXmlEmptyElemSyntax { + return []byte(prefix + "<" + rt + ">"), nil + } + return []byte(prefix + "<" + rt + "/>"), nil + } + if reflect.TypeOf(v).Kind() == reflect.Struct { + return xml.MarshalIndent(v, prefix, indent) + } + + var err error + s := new(bytes.Buffer) + p := new(pretty) + p.indent = indent + p.padding = prefix + + var b []byte + switch v.(type) { + case []interface{}: + if _, err = s.WriteString("<" + rt + ">\n"); err != nil { + return nil, err + } + p.Indent() + for _, vv := range v.([]interface{}) { + switch vv.(type) { + case map[string]interface{}: + m := vv.(map[string]interface{}) + if len(m) == 1 { + for tag, val := range m { + err = marshalMapToXmlIndent(true, s, tag, val, p) + } + } else { + p.start = 1 // we 1 tag in + err = marshalMapToXmlIndent(true, s, et, vv, p) + // *s += "\n" + if _, err = s.WriteString("\n"); err != nil { + return nil, err + } + } + default: + p.start = 0 // in case trailing p.start = 1 + err = marshalMapToXmlIndent(true, s, et, vv, p) + } + if err != nil { + break + } + } + if _, err = s.WriteString(``); err != nil { + return nil, err + } + b = s.Bytes() + case map[string]interface{}: + m := Map(v.(map[string]interface{})) + b, err = m.XmlIndent(prefix, indent, rt) + default: + err = marshalMapToXmlIndent(true, s, rt, v, p) + b = s.Bytes() + } + + return b, err +} diff --git a/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml b/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml new file mode 100644 index 0000000000..474575a41c --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/atomFeedString.xml @@ -0,0 +1,54 @@ + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + diff --git a/vendor/github.com/clbanning/mxj/v2/doc.go b/vendor/github.com/clbanning/mxj/v2/doc.go new file mode 100644 index 0000000000..bede312651 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/doc.go @@ -0,0 +1,138 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2019, Charles Banning. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file + +/* +Marshal/Unmarshal XML to/from map[string]interface{} values (and JSON); extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. The subpackage x2j-wrapper is provided to facilitate migrating from the x2j package. The x2j and j2x subpackages provide similar functionality of the old packages but are not function-name compatible with them. + +Note: this library was designed for processing ad hoc anonymous messages. Bulk processing large data sets may be much more efficiently performed using the encoding/xml or encoding/json packages from Go's standard library directly. + +Related Packages: + checkxml: github.com/clbanning/checkxml provides functions for validating XML data. + +Notes: + 2020.05.01: v2.2 - optimize map to XML encoding for large XML docs. + 2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq. + 2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq. + 2019.01.21: DecodeSimpleValuesAsMap - decode to map[:map["#text":]] rather than map[:]. + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.21: github.com/clbanning/checkxml provides functions for validating XML data. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015-12-02: NewMapXmlSeq() with mv.XmlSeq() & co. will try to preserve structure of XML doc when re-encoding. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + +SUMMARY + + type Map map[string]interface{} + + Create a Map value, 'mv', from any map[string]interface{} value, 'v': + mv := Map(v) + + Unmarshal / marshal XML as a Map value, 'mv': + mv, err := NewMapXml(xmlValue) // unmarshal + xmlValue, err := mv.Xml() // marshal + + Unmarshal XML from an io.Reader as a Map value, 'mv': + mv, err := NewMapXmlReader(xmlReader) // repeated calls, as with an os.File Reader, will process stream + mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded + + Marshal Map value, 'mv', to an XML Writer (io.Writer): + err := mv.XmlWriter(xmlWriter) + raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter + + Also, for prettified output: + xmlValue, err := mv.XmlIndent(prefix, indent, ...) + err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...) + raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...) + + Bulk process XML with error handling (note: handlers must return a boolean value): + err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error)) + err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte)) + + Converting XML to JSON: see Examples for NewMapXml and HandleXmlReader. + + There are comparable functions and methods for JSON processing. + + Arbitrary structure values can be decoded to / encoded from Map values: + mv, err := NewMapStruct(structVal) + err := mv.Struct(structPointer) + + To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON + or structure to a Map value, 'mv', or cast a map[string]interface{} value to a Map value, 'mv', then: + paths := mv.PathsForKey(key) + path := mv.PathForKeyShortest(key) + values, err := mv.ValuesForKey(key, subkeys) + values, err := mv.ValuesForPath(path, subkeys) // 'path' can be dot-notation with wildcards and indexed arrays. + count, err := mv.UpdateValuesForPath(newVal, path, subkeys) + + Get everything at once, irrespective of path depth: + leafnodes := mv.LeafNodes() + leafvalues := mv.LeafValues() + + A new Map with whatever keys are desired can be created from the current Map and then encoded in XML + or JSON. (Note: keys can use dot-notation. 'oldKey' can also use wildcards and indexed arrays.) + newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N") + newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go" + newXml, err := newMap.Xml() // for example + newJson, err := newMap.Json() // ditto + +XML PARSING CONVENTIONS + + Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + + Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - something parses to map["ns:key"]interface{}{"something"} + - xmlns:ns="http://myns.com/ns" parses to map["xmlns:ns"]interface{}{"http://myns.com/ns"} + + Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +XML ENCODING CONVENTIONS + + - 'nil' Map values, which may represent 'null' JSON values, are encoded as "". + NOTE: the operation is not symmetric as "" elements are decoded as 'tag:""' Map values, + which, then, encode in JSON as '"tag":""' values.. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +*/ +package mxj diff --git a/vendor/github.com/clbanning/mxj/v2/escapechars.go b/vendor/github.com/clbanning/mxj/v2/escapechars.go new file mode 100644 index 0000000000..eeb3d25010 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/escapechars.go @@ -0,0 +1,93 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" +) + +var xmlEscapeChars bool + +// XMLEscapeChars(true) forces escaping invalid characters in attribute and element values. +// NOTE: this is brute force with NO interrogation of '&' being escaped already; if it is +// then '&' will be re-escaped as '&amp;'. +// +/* + The values are: + " " + ' ' + < < + > > + & & +*/ +// +// Note: if XMLEscapeCharsDecoder(true) has been called - or the default, 'false,' value +// has been toggled to 'true' - then XMLEscapeChars(true) is ignored. If XMLEscapeChars(true) +// has already been called before XMLEscapeCharsDecoder(true), XMLEscapeChars(false) is called +// to turn escape encoding on mv.Xml, etc., to prevent double escaping ampersands, '&'. +func XMLEscapeChars(b ...bool) { + var bb bool + if len(b) == 0 { + bb = !xmlEscapeChars + } else { + bb = b[0] + } + if bb == true && xmlEscapeCharsDecoder == false { + xmlEscapeChars = true + } else { + xmlEscapeChars = false + } +} + +// Scan for '&' first, since 's' may contain "&" that is parsed to "&amp;" +// - or "<" that is parsed to "&lt;". +var escapechars = [][2][]byte{ + {[]byte(`&`), []byte(`&`)}, + {[]byte(`<`), []byte(`<`)}, + {[]byte(`>`), []byte(`>`)}, + {[]byte(`"`), []byte(`"`)}, + {[]byte(`'`), []byte(`'`)}, +} + +func escapeChars(s string) string { + if len(s) == 0 { + return s + } + + b := []byte(s) + for _, v := range escapechars { + n := bytes.Count(b, v[0]) + if n == 0 { + continue + } + b = bytes.Replace(b, v[0], v[1], n) + } + return string(b) +} + +// per issue #84, escape CharData values from xml.Decoder + +var xmlEscapeCharsDecoder bool + +// XMLEscapeCharsDecoder(b ...bool) escapes XML characters in xml.CharData values +// returned by Decoder.Token. Thus, the internal Map values will contain escaped +// values, and you do not need to set XMLEscapeChars for proper encoding. +// +// By default, the Map values have the non-escaped values returned by Decoder.Token. +// XMLEscapeCharsDecoder(true) - or, XMLEscapeCharsDecoder() - will toggle escape +// encoding 'on.' +// +// Note: if XMLEscapeCharDecoder(true) is call then XMLEscapeChars(false) is +// called to prevent re-escaping the values on encoding using mv.Xml, etc. +func XMLEscapeCharsDecoder(b ...bool) { + if len(b) == 0 { + xmlEscapeCharsDecoder = !xmlEscapeCharsDecoder + } else { + xmlEscapeCharsDecoder = b[0] + } + if xmlEscapeCharsDecoder == true && xmlEscapeChars == true { + xmlEscapeChars = false + } +} diff --git a/vendor/github.com/clbanning/mxj/v2/exists.go b/vendor/github.com/clbanning/mxj/v2/exists.go new file mode 100644 index 0000000000..07aeda43f8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/exists.go @@ -0,0 +1,9 @@ +package mxj + +// Checks whether the path exists. If err != nil then 'false' is returned +// along with the error encountered parsing either the "path" or "subkeys" +// argument. +func (mv Map) Exists(path string, subkeys ...string) (bool, error) { + v, err := mv.ValuesForPath(path, subkeys...) + return (err == nil && len(v) > 0), err +} diff --git a/vendor/github.com/clbanning/mxj/v2/files.go b/vendor/github.com/clbanning/mxj/v2/files.go new file mode 100644 index 0000000000..27e06e1e80 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files.go @@ -0,0 +1,287 @@ +package mxj + +import ( + "fmt" + "io" + "os" +) + +type Maps []Map + +func NewMaps() Maps { + return make(Maps, 0) +} + +type MapRaw struct { + M Map + R []byte +} + +// NewMapsFromXmlFile - creates an array from a file of JSON values. +func NewMapsFromJsonFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ReadMapsFromJsonFileRaw - creates an array of MapRaw from a file of JSON values. +func NewMapsFromJsonFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapJsonReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFile - creates an array from a file of XML values. +func NewMapsFromXmlFile(name string) (Maps, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]Map, 0) + for { + m, raw, err := NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(raw)) + } + if len(m) > 0 { + am = append(am, m) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// NewMapsFromXmlFileRaw - creates an array of MapRaw from a file of XML values. +// NOTE: the slice with the raw XML is clean with no extra capacity - unlike NewMapXmlReaderRaw(). +// It is slow at parsing a file from disk and is intended for relatively small utility files. +func NewMapsFromXmlFileRaw(name string) ([]MapRaw, error) { + fi, err := os.Stat(name) + if err != nil { + return nil, err + } + if !fi.Mode().IsRegular() { + return nil, fmt.Errorf("file %s is not a regular file", name) + } + + fh, err := os.Open(name) + if err != nil { + return nil, err + } + defer fh.Close() + + am := make([]MapRaw, 0) + for { + mr := new(MapRaw) + mr.M, mr.R, err = NewMapXmlReaderRaw(fh) + if err != nil && err != io.EOF { + return am, fmt.Errorf("error: %s - reading: %s", err.Error(), string(mr.R)) + } + if len(mr.M) > 0 { + am = append(am, *mr) + } + if err == io.EOF { + break + } + } + return am, nil +} + +// ------------------------ Maps writing ------------------------- +// These are handy-dandy methods for dumping configuration data, etc. + +// JsonString - analogous to mv.Json() +func (mvs Maps) JsonString(safeEncoding ...bool) (string, error) { + var s string + for _, v := range mvs { + j, err := v.Json() + if err != nil { + return s, err + } + s += string(j) + } + return s, nil +} + +// JsonStringIndent - analogous to mv.JsonIndent() +func (mvs Maps) JsonStringIndent(prefix, indent string, safeEncoding ...bool) (string, error) { + var s string + var haveFirst bool + for _, v := range mvs { + j, err := v.JsonIndent(prefix, indent) + if err != nil { + return s, err + } + if haveFirst { + s += "\n" + } else { + haveFirst = true + } + s += string(j) + } + return s, nil +} + +// XmlString - analogous to mv.Xml() +func (mvs Maps) XmlString() (string, error) { + var s string + for _, v := range mvs { + x, err := v.Xml() + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// XmlStringIndent - analogous to mv.XmlIndent() +func (mvs Maps) XmlStringIndent(prefix, indent string) (string, error) { + var s string + for _, v := range mvs { + x, err := v.XmlIndent(prefix, indent) + if err != nil { + return s, err + } + s += string(x) + } + return s, nil +} + +// JsonFile - write Maps to named file as JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonWriter method. +func (mvs Maps) JsonFile(file string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonString(encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// JsonFileIndent - write Maps to named file as pretty JSON +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use JsonIndentWriter method. +func (mvs Maps) JsonFileIndent(file, prefix, indent string, safeEncoding ...bool) error { + var encoding bool + if len(safeEncoding) == 1 { + encoding = safeEncoding[0] + } + s, err := mvs.JsonStringIndent(prefix, indent, encoding) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFile - write Maps to named file as XML +// Note: the file will be created, if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlWriter method. +func (mvs Maps) XmlFile(file string) error { + s, err := mvs.XmlString() + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} + +// XmlFileIndent - write Maps to named file as pretty XML +// Note: the file will be created,if necessary; if it exists it will be truncated. +// If you need to append to a file, open it and use XmlIndentWriter method. +func (mvs Maps) XmlFileIndent(file, prefix, indent string) error { + s, err := mvs.XmlStringIndent(prefix, indent) + if err != nil { + return err + } + fh, err := os.Create(file) + if err != nil { + return err + } + defer fh.Close() + fh.WriteString(s) + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.badjson b/vendor/github.com/clbanning/mxj/v2/files_test.badjson new file mode 100644 index 0000000000..d18720044a --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.badjson @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"some", "bad":JSON, "in":"it" } diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.badxml b/vendor/github.com/clbanning/mxj/v2/files_test.badxml new file mode 100644 index 0000000000..4736ef973d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.badxml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.json b/vendor/github.com/clbanning/mxj/v2/files_test.json new file mode 100644 index 0000000000..e9a3ddf40e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.json @@ -0,0 +1,2 @@ +{ "this":"is", "a":"test", "file":"for", "files_test.go":"case" } +{ "with":"just", "two":2, "JSON":"values", "true":true } diff --git a/vendor/github.com/clbanning/mxj/v2/files_test.xml b/vendor/github.com/clbanning/mxj/v2/files_test.xml new file mode 100644 index 0000000000..65cf021fb7 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test.xml @@ -0,0 +1,9 @@ + + test + for files.go + + + some + doc + test case + diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_dup.json b/vendor/github.com/clbanning/mxj/v2/files_test_dup.json new file mode 100644 index 0000000000..2becb6a451 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_dup.json @@ -0,0 +1 @@ +{"a":"test","file":"for","files_test.go":"case","this":"is"}{"JSON":"values","true":true,"two":2,"with":"just"} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml b/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml new file mode 100644 index 0000000000..f68d22e28e --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_dup.xml @@ -0,0 +1 @@ +for files.gotestdoctest casesome \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_indent.json b/vendor/github.com/clbanning/mxj/v2/files_test_indent.json new file mode 100644 index 0000000000..6fde15634d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_indent.json @@ -0,0 +1,12 @@ +{ + "a": "test", + "file": "for", + "files_test.go": "case", + "this": "is" +} +{ + "JSON": "values", + "true": true, + "two": 2, + "with": "just" +} \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml b/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml new file mode 100644 index 0000000000..8c91a1dc20 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/files_test_indent.xml @@ -0,0 +1,8 @@ + + for files.go + test + + doc + test case + some + \ No newline at end of file diff --git a/vendor/github.com/clbanning/mxj/v2/gob.go b/vendor/github.com/clbanning/mxj/v2/gob.go new file mode 100644 index 0000000000..d56c2fd6fe --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/gob.go @@ -0,0 +1,35 @@ +// gob.go - Encode/Decode a Map into a gob object. + +package mxj + +import ( + "bytes" + "encoding/gob" +) + +// NewMapGob returns a Map value for a gob object that has been +// encoded from a map[string]interface{} (or compatible type) value. +// It is intended to provide symmetric handling of Maps that have +// been encoded using mv.Gob. +func NewMapGob(gobj []byte) (Map, error) { + m := make(map[string]interface{}, 0) + if len(gobj) == 0 { + return m, nil + } + r := bytes.NewReader(gobj) + dec := gob.NewDecoder(r) + if err := dec.Decode(&m); err != nil { + return m, err + } + return m, nil +} + +// Gob returns a gob-encoded value for the Map 'mv'. +func (mv Map) Gob() ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + if err := enc.Encode(map[string]interface{}(mv)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/json.go b/vendor/github.com/clbanning/mxj/v2/json.go new file mode 100644 index 0000000000..eb2c05a186 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/json.go @@ -0,0 +1,323 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "time" +) + +// ------------------------------ write JSON ----------------------- + +// Just a wrapper on json.Marshal. +// If option safeEncoding is'true' then safe encoding of '<', '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) Json(safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.Marshal(mv) + + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// Just a wrapper on json.MarshalIndent. +// If option safeEncoding is'true' then safe encoding of '<' , '>' and '&' +// is preserved. (see encoding/json#Marshal, encoding/json#Encode) +func (mv Map) JsonIndent(prefix, indent string, safeEncoding ...bool) ([]byte, error) { + var s bool + if len(safeEncoding) == 1 { + s = safeEncoding[0] + } + + b, err := json.MarshalIndent(mv, prefix, indent) + if !s { + b = bytes.Replace(b, []byte("\\u003c"), []byte("<"), -1) + b = bytes.Replace(b, []byte("\\u003e"), []byte(">"), -1) + b = bytes.Replace(b, []byte("\\u0026"), []byte("&"), -1) + } + return b, err +} + +// The following implementation is provided for symmetry with NewMapJsonReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriter(jsonWriter io.Writer, safeEncoding ...bool) error { + b, err := mv.Json(safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonWriterRaw(jsonWriter io.Writer, safeEncoding ...bool) ([]byte, error) { + b, err := mv.Json(safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// Writes the Map as pretty JSON on the Writer. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriter(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) error { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return err + } + + _, err = jsonWriter.Write(b) + return err +} + +// Writes the Map as pretty JSON on the Writer. []byte is the raw JSON that was written. +// If 'safeEncoding' is 'true', then "safe" encoding of '<', '>' and '&' is preserved. +func (mv Map) JsonIndentWriterRaw(jsonWriter io.Writer, prefix, indent string, safeEncoding ...bool) ([]byte, error) { + b, err := mv.JsonIndent(prefix, indent, safeEncoding...) + if err != nil { + return b, err + } + + _, err = jsonWriter.Write(b) + return b, err +} + +// --------------------------- read JSON ----------------------------- + +// Decode numericvalues as json.Number type Map values - see encoding/json#Number. +// NOTE: this is for decoding JSON into a Map with NewMapJson(), NewMapJsonReader(), +// etc.; it does not affect NewMapXml(), etc. The XML encoders mv.Xml() and mv.XmlIndent() +// do recognize json.Number types; a JSON object can be decoded to a Map with json.Number +// value types and the resulting Map can be correctly encoded into a XML object. +var JsonUseNumber bool + +// Just a wrapper on json.Unmarshal +// Converting JSON to XML is a simple as: +// ... +// mapVal, merr := mxj.NewMapJson(jsonVal) +// if merr != nil { +// // handle error +// } +// xmlVal, xerr := mapVal.Xml() +// if xerr != nil { +// // handle error +// } +// NOTE: as a special case, passing a list, e.g., [{"some-null-value":"", "a-non-null-value":"bar"}], +// will be interpreted as having the root key 'object' prepended - {"object":[ ... ]} - to unmarshal to a Map. +// See mxj/j2x/j2x_test.go. +func NewMapJson(jsonVal []byte) (Map, error) { + // empty or nil begets empty + if len(jsonVal) == 0 { + m := make(map[string]interface{}, 0) + return m, nil + } + // handle a goofy case ... + if jsonVal[0] == '[' { + jsonVal = []byte(`{"object":` + string(jsonVal) + `}`) + } + m := make(map[string]interface{}) + // err := json.Unmarshal(jsonVal, &m) + buf := bytes.NewReader(jsonVal) + dec := json.NewDecoder(buf) + if JsonUseNumber { + dec.UseNumber() + } + err := dec.Decode(&m) + return m, err +} + +// Retrieve a Map value from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object. +func NewMapJsonReader(jsonReader io.Reader) (Map, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, err + } + + // Unmarshal the 'presumed' JSON string + return NewMapJson(*jb) +} + +// Retrieve a Map value and raw JSON - []byte - from an io.Reader. +// NOTE: The raw JSON off the reader is buffered to []byte using a ByteReader. If the io.Reader is an +// os.File, there may be significant performance impact. If the io.Reader is wrapping a []byte +// value in-memory, however, such as http.Request.Body you CAN use it to efficiently unmarshal +// a JSON object and retrieve the raw JSON in a single call. +func NewMapJsonReaderRaw(jsonReader io.Reader) (Map, []byte, error) { + jb, err := getJson(jsonReader) + if err != nil || len(*jb) == 0 { + return nil, *jb, err + } + + // Unmarshal the 'presumed' JSON string + m, merr := NewMapJson(*jb) + return m, *jb, merr +} + +// Pull the next JSON string off the stream: just read from first '{' to its closing '}'. +// Returning a pointer to the slice saves 16 bytes - maybe unnecessary, but internal to package. +func getJson(rdr io.Reader) (*[]byte, error) { + bval := make([]byte, 1) + jb := make([]byte, 0) + var inQuote, inJson bool + var parenCnt int + var previous byte + + // scan the input for a matched set of {...} + // json.Unmarshal will handle syntax checking. + for { + _, err := rdr.Read(bval) + if err != nil { + if err == io.EOF && inJson && parenCnt > 0 { + return &jb, fmt.Errorf("no closing } for JSON string: %s", string(jb)) + } + return &jb, err + } + switch bval[0] { + case '{': + if !inQuote { + parenCnt++ + inJson = true + } + case '}': + if !inQuote { + parenCnt-- + } + if parenCnt < 0 { + return nil, fmt.Errorf("closing } without opening {: %s", string(jb)) + } + case '"': + if inQuote { + if previous == '\\' { + break + } + inQuote = false + } else { + inQuote = true + } + case '\n', '\r', '\t', ' ': + if !inQuote { + continue + } + } + if inJson { + jb = append(jb, bval[0]) + if parenCnt == 0 { + break + } + } + previous = bval[0] + } + + return &jb, nil +} + +// ------------------------------- JSON Reader handler via Map values ----------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var jhandlerPollInterval = time.Duration(1e6) + +// While unnecessary, we make HandleJsonReader() have the same signature as HandleXmlReader(). +// This avoids treating one or other as a special case and discussing the underlying stdlib logic. + +// Bulk process JSON using handlers that process a Map value. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map processing handler. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleJsonReader(jsonReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapJsonReader(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process JSON using handlers that process a Map value and the raw JSON. +// 'rdr' is an io.Reader for the JSON (stream). +// 'mapHandler' is the Map and raw JSON - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw JSON processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +func HandleJsonReaderRaw(jsonReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapJsonReaderRaw(jsonReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[jsonReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + <-time.After(jhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/keyvalues.go b/vendor/github.com/clbanning/mxj/v2/keyvalues.go new file mode 100644 index 0000000000..55620ca22b --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/keyvalues.go @@ -0,0 +1,668 @@ +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// keyvalues.go: Extract values from an arbitrary XML doc. Tag path can include wildcard characters. + +package mxj + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +// ----------------------------- get everything FOR a single key ------------------------- + +const ( + minArraySize = 32 +) + +var defaultArraySize int = minArraySize + +// SetArraySize adjust the buffers for expected number of values to return from ValuesForKey() and ValuesForPath(). +// This can have the effect of significantly reducing memory allocation-copy functions for large data sets. +// Returns the initial buffer size. +func SetArraySize(size int) int { + if size > minArraySize { + defaultArraySize = size + } else { + defaultArraySize = minArraySize + } + return defaultArraySize +} + +// ValuesForKey return all values in Map, 'mv', associated with a 'key'. If len(returned_values) == 0, then no match. +// On error, the returned slice is 'nil'. NOTE: 'key' can be wildcard, "*". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - If the 'key' refers to a list, then "key:value" could select a list member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForKey(key string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + ret := make([]interface{}, 0, defaultArraySize) + var cnt int + hasKey(m, key, &ret, &cnt, subKeyMap) + return ret[:cnt], nil +} + +var KeyNotExistError = errors.New("Key does not exist") + +// ValueForKey is a wrapper on ValuesForKey. It returns the first member of []interface{}, if any. +// If there is no value, "nil, nil" is returned. +func (mv Map) ValueForKey(key string, subkeys ...string) (interface{}, error) { + vals, err := mv.ValuesForKey(key, subkeys...) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, KeyNotExistError + } + return vals[0], nil +} + +// hasKey - if the map 'key' exists append it to array +// if it doesn't do nothing except scan array and map values +func hasKey(iv interface{}, key string, ret *[]interface{}, cnt *int, subkeys map[string]interface{}) { + // func hasKey(iv interface{}, key string, ret *[]interface{}, subkeys map[string]interface{}) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + // see if the current value is of interest + if v, ok := vv[key]; ok { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + + // wildcard case + if key == "*" { + for _, v := range vv { + switch v.(type) { + case map[string]interface{}: + if hasSubKeys(v, subkeys) { + *ret = append(*ret, v) + *cnt++ + } + case []interface{}: + for _, av := range v.([]interface{}) { + if hasSubKeys(av, subkeys) { + *ret = append(*ret, av) + *cnt++ + } + } + default: + if len(subkeys) == 0 { + *ret = append(*ret, v) + *cnt++ + } + } + } + } + + // scan the rest + for _, v := range vv { + hasKey(v, key, ret, cnt, subkeys) + } + case []interface{}: + for _, v := range iv.([]interface{}) { + hasKey(v, key, ret, cnt, subkeys) + } + } +} + +// ----------------------- get everything for a node in the Map --------------------------- + +// Allow indexed arrays in "path" specification. (Request from Abhijit Kadam - abhijitk100@gmail.com.) +// 2014.04.28 - implementation note. +// Implemented as a wrapper of (old)ValuesForPath() because we need look-ahead logic to handle expansion +// of wildcards and unindexed arrays. Embedding such logic into valuesForKeyPath() would have made the +// code much more complicated; this wrapper is straightforward, easy to debug, and doesn't add significant overhead. + +// ValuesForPatb retrieves all values for a path from the Map. If len(returned_values) == 0, then no match. +// On error, the returned array is 'nil'. +// 'path' is a dot-separated path of key values. +// - If a node in the path is '*', then everything beyond is walked. +// - 'path' can contain indexed array references, such as, "*.data[1]" and "msgs[2].data[0].field" - +// even "*[2].*[0].field". +// 'subkeys' (optional) are "key:val[:type]" strings representing attributes or elements in a list. +// - By default 'val' is of type string. "key:val:bool" and "key:val:float" to coerce them. +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - If the 'path' refers to a list, then "tag:value" would return member of the list. +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// - If val contains ":" symbol, use SetFieldSeparator to a unused symbol, perhaps "|". +func (mv Map) ValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + // If there are no array indexes in path, use legacy ValuesForPath() logic. + if strings.Index(path, "[") < 0 { + return mv.oldValuesForPath(path, subkeys...) + } + + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys, kerr := parsePath(path) + if kerr != nil { + return nil, kerr + } + + vals, verr := valuesForArray(keys, mv) + if verr != nil { + return nil, verr // Vals may be nil, but return empty array. + } + + // Need to handle subkeys ... only return members of vals that satisfy conditions. + retvals := make([]interface{}, 0) + for _, v := range vals { + if hasSubKeys(v, subKeyMap) { + retvals = append(retvals, v) + } + } + return retvals, nil +} + +func valuesForArray(keys []*key, m Map) ([]interface{}, error) { + var tmppath string + var haveFirst bool + var vals []interface{} + var verr error + + lastkey := len(keys) - 1 + for i := 0; i <= lastkey; i++ { + if !haveFirst { + tmppath = keys[i].name + haveFirst = true + } else { + tmppath += "." + keys[i].name + } + + // Look-ahead: explode wildcards and unindexed arrays. + // Need to handle un-indexed list recursively: + // e.g., path is "stuff.data[0]" rather than "stuff[0].data[0]". + // Need to treat it as "stuff[0].data[0]", "stuff[1].data[0]", ... + if !keys[i].isArray && i < lastkey && keys[i+1].isArray { + // Can't pass subkeys because we may not be at literal end of path. + vv, vverr := m.oldValuesForPath(tmppath) + if vverr != nil { + return nil, vverr + } + for _, v := range vv { + // See if we can walk the value. + am, ok := v.(map[string]interface{}) + if !ok { + continue + } + // Work the backend. + nvals, nvalserr := valuesForArray(keys[i+1:], Map(am)) + if nvalserr != nil { + return nil, nvalserr + } + vals = append(vals, nvals...) + } + break // have recursed the whole path - return + } + + if keys[i].isArray || i == lastkey { + // Don't pass subkeys because may not be at literal end of path. + vals, verr = m.oldValuesForPath(tmppath) + } else { + continue + } + if verr != nil { + return nil, verr + } + + if i == lastkey && !keys[i].isArray { + break + } + + // Now we're looking at an array - supposedly. + // Is index in range of vals? + if len(vals) <= keys[i].position { + vals = nil + break + } + + // Return the array member of interest, if at end of path. + if i == lastkey { + vals = vals[keys[i].position:(keys[i].position + 1)] + break + } + + // Extract the array member of interest. + am := vals[keys[i].position:(keys[i].position + 1)] + + // must be a map[string]interface{} value so we can keep walking the path + amm, ok := am[0].(map[string]interface{}) + if !ok { + vals = nil + break + } + + m = Map(amm) + haveFirst = false + } + + return vals, nil +} + +type key struct { + name string + isArray bool + position int +} + +func parsePath(s string) ([]*key, error) { + keys := strings.Split(s, ".") + + ret := make([]*key, 0) + + for i := 0; i < len(keys); i++ { + if keys[i] == "" { + continue + } + + newkey := new(key) + if strings.Index(keys[i], "[") < 0 { + newkey.name = keys[i] + ret = append(ret, newkey) + continue + } + + p := strings.Split(keys[i], "[") + newkey.name = p[0] + p = strings.Split(p[1], "]") + if p[0] == "" { // no right bracket + return nil, fmt.Errorf("no right bracket on key index: %s", keys[i]) + } + // convert p[0] to a int value + pos, nerr := strconv.ParseInt(p[0], 10, 32) + if nerr != nil { + return nil, fmt.Errorf("cannot convert index to int value: %s", p[0]) + } + newkey.position = int(pos) + newkey.isArray = true + ret = append(ret, newkey) + } + + return ret, nil +} + +// legacy ValuesForPath() - now wrapped to handle special case of indexed arrays in 'path'. +func (mv Map) oldValuesForPath(path string, subkeys ...string) ([]interface{}, error) { + m := map[string]interface{}(mv) + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return nil, err + } + } + + keys := strings.Split(path, ".") + if keys[len(keys)-1] == "" { + keys = keys[:len(keys)-1] + } + ivals := make([]interface{}, 0, defaultArraySize) + var cnt int + valuesForKeyPath(&ivals, &cnt, m, keys, subKeyMap) + return ivals[:cnt], nil +} + +func valuesForKeyPath(ret *[]interface{}, cnt *int, m interface{}, keys []string, subkeys map[string]interface{}) { + lenKeys := len(keys) + + // load 'm' values into 'ret' + // expand any lists + if lenKeys == 0 { + switch m.(type) { + case map[string]interface{}: + if subkeys != nil { + if ok := hasSubKeys(m, subkeys); !ok { + return + } + } + *ret = append(*ret, m) + *cnt++ + case []interface{}: + for i, v := range m.([]interface{}) { + if subkeys != nil { + if ok := hasSubKeys(v, subkeys); !ok { + continue // only load list members with subkeys + } + } + *ret = append(*ret, (m.([]interface{}))[i]) + *cnt++ + } + default: + if subkeys != nil { + return // must be map[string]interface{} if there are subkeys + } + *ret = append(*ret, m) + *cnt++ + } + return + } + + // key of interest + key := keys[0] + switch key { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + default: + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, v, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, v, keys[1:], subkeys) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[key]; ok { + // valuesForKeyPath(ret, vv, keys[1:], subkeys) + valuesForKeyPath(ret, cnt, vv, keys[1:], subkeys) + } + } + } + } + } +} + +// hasSubKeys() - interface{} equality works for string, float64, bool +// 'v' must be a map[string]interface{} value to have subkeys +// 'a' can have k:v pairs with v.(string) == "*", which is treated like a wildcard. +func hasSubKeys(v interface{}, subkeys map[string]interface{}) bool { + if len(subkeys) == 0 { + return true + } + + switch v.(type) { + case map[string]interface{}: + // do all subKey name:value pairs match? + mv := v.(map[string]interface{}) + for skey, sval := range subkeys { + isNotKey := false + if skey[:1] == "!" { // a NOT-key + skey = skey[1:] + isNotKey = true + } + vv, ok := mv[skey] + if !ok { // key doesn't exist + if isNotKey { // key not there, but that's what we want + if kv, ok := sval.(string); ok && kv == "*" { + continue + } + } + return false + } + // wildcard check + if kv, ok := sval.(string); ok && kv == "*" { + if isNotKey { // key is there, and we don't want it + return false + } + continue + } + switch sval.(type) { + case string: + if s, ok := vv.(string); ok && s == sval.(string) { + if isNotKey { + return false + } + continue + } + case bool: + if b, ok := vv.(bool); ok && b == sval.(bool) { + if isNotKey { + return false + } + continue + } + case float64: + if f, ok := vv.(float64); ok && f == sval.(float64) { + if isNotKey { + return false + } + continue + } + } + // key there but didn't match subkey value + if isNotKey { // that's what we want + continue + } + return false + } + // all subkeys matched + return true + } + + // not a map[string]interface{} value, can't have subkeys + return false +} + +// Generate map of key:value entries as map[string]string. +// 'kv' arguments are "name:value" pairs: attribute keys are designated with prepended hyphen, '-'. +// If len(kv) == 0, the return is (nil, nil). +func getSubKeyMap(kv ...string) (map[string]interface{}, error) { + if len(kv) == 0 { + return nil, nil + } + m := make(map[string]interface{}, 0) + for _, v := range kv { + vv := strings.Split(v, fieldSep) + switch len(vv) { + case 2: + m[vv[0]] = interface{}(vv[1]) + case 3: + switch vv[2] { + case "string", "char", "text": + m[vv[0]] = interface{}(vv[1]) + case "bool", "boolean": + // ParseBool treats "1"==true & "0"==false + b, err := strconv.ParseBool(vv[1]) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to bool: %s", vv[1]) + } + m[vv[0]] = interface{}(b) + case "float", "float64", "num", "number", "numeric": + f, err := strconv.ParseFloat(vv[1], 64) + if err != nil { + return nil, fmt.Errorf("can't convert subkey value to float: %s", vv[1]) + } + m[vv[0]] = interface{}(f) + default: + return nil, fmt.Errorf("unknown subkey conversion spec: %s", v) + } + default: + return nil, fmt.Errorf("unknown subkey spec: %s", v) + } + } + return m, nil +} + +// ------------------------------- END of valuesFor ... ---------------------------- + +// ----------------------- locate where a key value is in the tree ------------------- + +//----------------------------- find all paths to a key -------------------------------- + +// PathsForKey returns all paths through Map, 'mv', (in dot-notation) that terminate with the specified key. +// Results can be used with ValuesForPath. +func (mv Map) PathsForKey(key string) []string { + m := map[string]interface{}(mv) + breadbasket := make(map[string]bool, 0) + breadcrumbs := "" + + hasKeyPath(breadcrumbs, m, key, breadbasket) + if len(breadbasket) == 0 { + return nil + } + + // unpack map keys to return + res := make([]string, len(breadbasket)) + var i int + for k := range breadbasket { + res[i] = k + i++ + } + + return res +} + +// PathForKeyShortest extracts the shortest path from all possible paths - from PathsForKey() - in Map, 'mv'.. +// Paths are strings using dot-notation. +func (mv Map) PathForKeyShortest(key string) string { + paths := mv.PathsForKey(key) + + lp := len(paths) + if lp == 0 { + return "" + } + if lp == 1 { + return paths[0] + } + + shortest := paths[0] + shortestLen := len(strings.Split(shortest, ".")) + + for i := 1; i < len(paths); i++ { + vlen := len(strings.Split(paths[i], ".")) + if vlen < shortestLen { + shortest = paths[i] + shortestLen = vlen + } + } + + return shortest +} + +// hasKeyPath - if the map 'key' exists append it to KeyPath.path and increment KeyPath.depth +// This is really just a breadcrumber that saves all trails that hit the prescribed 'key'. +func hasKeyPath(crumbs string, iv interface{}, key string, basket map[string]bool) { + switch iv.(type) { + case map[string]interface{}: + vv := iv.(map[string]interface{}) + if _, ok := vv[key]; ok { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = key + } else { + nbc = crumbs + "." + key + } + basket[nbc] = true + } + // walk on down the path, key could occur again at deeper node + for k, v := range vv { + // create a new breadcrumb, intialized with the one we have + var nbc string + if crumbs == "" { + nbc = k + } else { + nbc = crumbs + "." + k + } + hasKeyPath(nbc, v, key, basket) + } + case []interface{}: + // crumb-trail doesn't change, pass it on + for _, v := range iv.([]interface{}) { + hasKeyPath(crumbs, v, key, basket) + } + } +} + +var PathNotExistError = errors.New("Path does not exist") + +// ValueForPath wraps ValuesFor Path and returns the first value returned. +// If no value is found it returns 'nil' and PathNotExistError. +func (mv Map) ValueForPath(path string) (interface{}, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return nil, err + } + if len(vals) == 0 { + return nil, PathNotExistError + } + return vals[0], nil +} + +// ValuesForPathString returns the first found value for the path as a string. +func (mv Map) ValueForPathString(path string) (string, error) { + vals, err := mv.ValuesForPath(path) + if err != nil { + return "", err + } + if len(vals) == 0 { + return "", errors.New("ValueForPath: path not found") + } + val := vals[0] + return fmt.Sprintf("%v", val), nil +} + +// ValueOrEmptyForPathString returns the first found value for the path as a string. +// If the path is not found then it returns an empty string. +func (mv Map) ValueOrEmptyForPathString(path string) string { + str, _ := mv.ValueForPathString(path) + return str +} diff --git a/vendor/github.com/clbanning/mxj/v2/leafnode.go b/vendor/github.com/clbanning/mxj/v2/leafnode.go new file mode 100644 index 0000000000..cf413ebdd4 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/leafnode.go @@ -0,0 +1,112 @@ +package mxj + +// leafnode.go - return leaf nodes with paths and values for the Map +// inspired by: https://groups.google.com/forum/#!topic/golang-nuts/3JhuVKRuBbw + +import ( + "strconv" + "strings" +) + +const ( + NoAttributes = true // suppress LeafNode values that are attributes +) + +// LeafNode - a terminal path value in a Map. +// For XML Map values it represents an attribute or simple element value - of type +// string unless Map was created using Cast flag. For JSON Map values it represents +// a string, numeric, boolean, or null value. +type LeafNode struct { + Path string // a dot-notation representation of the path with array subscripting + Value interface{} // the value at the path termination +} + +// LeafNodes - returns an array of all LeafNode values for the Map. +// The option no_attr argument suppresses attribute values (keys with prepended hyphen, '-') +// as well as the "#text" key for the associated simple element value. +// +// PrependAttrWithHypen(false) will result in attributes having .attr-name as +// terminal node in 'path' while the path for the element value, itself, will be +// the base path w/o "#text". +// +// LeafUseDotNotation(true) causes list members to be identified using ".N" syntax +// rather than "[N]" syntax. +func (mv Map) LeafNodes(no_attr ...bool) []LeafNode { + var a bool + if len(no_attr) == 1 { + a = no_attr[0] + } + + l := make([]LeafNode, 0) + getLeafNodes("", "", map[string]interface{}(mv), &l, a) + return l +} + +func getLeafNodes(path, node string, mv interface{}, l *[]LeafNode, noattr bool) { + // if stripping attributes, then also strip "#text" key + if !noattr || node != "#text" { + if path != "" && node[:1] != "[" { + path += "." + } + path += node + } + switch mv.(type) { + case map[string]interface{}: + for k, v := range mv.(map[string]interface{}) { + // if noattr && k[:1] == "-" { + if noattr && len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue + } + getLeafNodes(path, k, v, l, noattr) + } + case []interface{}: + for i, v := range mv.([]interface{}) { + if useDotNotation { + getLeafNodes(path, strconv.Itoa(i), v, l, noattr) + } else { + getLeafNodes(path, "["+strconv.Itoa(i)+"]", v, l, noattr) + } + } + default: + // can't walk any further, so create leaf + n := LeafNode{path, mv} + *l = append(*l, n) + } +} + +// LeafPaths - all paths that terminate in LeafNode values. +func (mv Map) LeafPaths(no_attr ...bool) []string { + ln := mv.LeafNodes() + ss := make([]string, len(ln)) + for i := 0; i < len(ln); i++ { + ss[i] = ln[i].Path + } + return ss +} + +// LeafValues - all terminal values in the Map. +func (mv Map) LeafValues(no_attr ...bool) []interface{} { + ln := mv.LeafNodes() + vv := make([]interface{}, len(ln)) + for i := 0; i < len(ln); i++ { + vv[i] = ln[i].Value + } + return vv +} + +// ====================== utilities ====================== + +// https://groups.google.com/forum/#!topic/golang-nuts/pj0C5IrZk4I +var useDotNotation bool + +// LeafUseDotNotation sets a flag that list members in LeafNode paths +// should be identified using ".N" syntax rather than the default "[N]" +// syntax. Calling LeafUseDotNotation with no arguments toggles the +// flag on/off; otherwise, the argument sets the flag value 'true'/'false'. +func LeafUseDotNotation(b ...bool) { + if len(b) == 0 { + useDotNotation = !useDotNotation + return + } + useDotNotation = b[0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/misc.go b/vendor/github.com/clbanning/mxj/v2/misc.go new file mode 100644 index 0000000000..5b4fab2165 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/misc.go @@ -0,0 +1,86 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// misc.go - mimic functions (+others) called out in: +// https://groups.google.com/forum/#!topic/golang-nuts/jm_aGsJNbdQ +// Primarily these methods let you retrive XML structure information. + +package mxj + +import ( + "fmt" + "sort" + "strings" +) + +// Return the root element of the Map. If there is not a single key in Map, +// then an error is returned. +func (mv Map) Root() (string, error) { + mm := map[string]interface{}(mv) + if len(mm) != 1 { + return "", fmt.Errorf("Map does not have singleton root. Len: %d.", len(mm)) + } + for k, _ := range mm { + return k, nil + } + return "", nil +} + +// If the path is an element with sub-elements, return a list of the sub-element +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are prefixed with +// '-', a hyphen, are considered attributes; see m.Attributes(path). +func (mv Map) Elements(path string) ([]string, error) { + e, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch e.(type) { + case map[string]interface{}: + ee := e.(map[string]interface{}) + elems := make([]string, len(ee)) + var i int + for k, _ := range ee { + if len(attrPrefix) > 0 && strings.Index(k, attrPrefix) == 0 { + continue // skip attributes + } + elems[i] = k + i++ + } + elems = elems[:i] + // alphabetic sort keeps things tidy + sort.Strings(elems) + return elems, nil + } + return nil, fmt.Errorf("no elements for path: %s", path) +} + +// If the path is an element with attributes, return a list of the attribute +// keys. (The list is alphabeticly sorted.) NOTE: Map keys that are not prefixed with +// '-', a hyphen, are not treated as attributes; see m.Elements(path). Also, if the +// attribute prefix is "" - SetAttrPrefix("") or PrependAttrWithHyphen(false) - then +// there are no identifiable attributes. +func (mv Map) Attributes(path string) ([]string, error) { + a, err := mv.ValueForPath(path) + if err != nil { + return nil, err + } + switch a.(type) { + case map[string]interface{}: + aa := a.(map[string]interface{}) + attrs := make([]string, len(aa)) + var i int + for k, _ := range aa { + if len(attrPrefix) == 0 || strings.Index(k, attrPrefix) != 0 { + continue // skip non-attributes + } + attrs[i] = k[len(attrPrefix):] + i++ + } + attrs = attrs[:i] + // alphabetic sort keeps things tidy + sort.Strings(attrs) + return attrs, nil + } + return nil, fmt.Errorf("no attributes for path: %s", path) +} diff --git a/vendor/github.com/clbanning/mxj/v2/mxj.go b/vendor/github.com/clbanning/mxj/v2/mxj.go new file mode 100644 index 0000000000..f0592f06c8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/mxj.go @@ -0,0 +1,128 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "fmt" + "sort" +) + +const ( + Cast = true // for clarity - e.g., mxj.NewMapXml(doc, mxj.Cast) + SafeEncoding = true // ditto - e.g., mv.Json(mxj.SafeEncoding) +) + +type Map map[string]interface{} + +// Allocate a Map. +func New() Map { + m := make(map[string]interface{}, 0) + return m +} + +// Cast a Map to map[string]interface{} +func (mv Map) Old() map[string]interface{} { + return mv +} + +// Return a copy of mv as a newly allocated Map. If the Map only contains string, +// numeric, map[string]interface{}, and []interface{} values, then it can be thought +// of as a "deep copy." Copying a structure (or structure reference) value is subject +// to the noted restrictions. +// NOTE: If 'mv' includes structure values with, possibly, JSON encoding tags +// then only public fields of the structure are in the new Map - and with +// keys that conform to any encoding tag instructions. The structure itself will +// be represented as a map[string]interface{} value. +func (mv Map) Copy() (Map, error) { + // this is the poor-man's deep copy + // not efficient, but it works + j, jerr := mv.Json() + // must handle, we don't know how mv got built + if jerr != nil { + return nil, jerr + } + return NewMapJson(j) +} + +// --------------- StringIndent ... from x2j.WriteMap ------------- + +// Pretty print a Map. +func (mv Map) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(mv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (mv Map) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(mv), false, true, offset...) +} + +// writeMap - dumps the map[string]interface{} for examination. +// 'typeInfo' causes value type to be printed. +// 'offset' is initial indentation count; typically: Write(m). +func writeMap(m interface{}, typeInfo, root bool, offset ...int) string { + var indent int + if len(offset) == 1 { + indent = offset[0] + } + + var s string + switch m.(type) { + case []interface{}: + if typeInfo { + s += "[[]interface{}]" + } + for _, v := range m.([]interface{}) { + s += "\n" + for i := 0; i < indent; i++ { + s += " " + } + s += writeMap(v, typeInfo, false, indent+1) + } + case map[string]interface{}: + list := make([][2]string, len(m.(map[string]interface{}))) + var n int + for k, v := range m.(map[string]interface{}) { + list[n][0] = k + list[n][1] = writeMap(v, typeInfo, false, indent+1) + n++ + } + sort.Sort(mapList(list)) + for _, v := range list { + if root { + root = false + } else { + s += "\n" + } + for i := 0; i < indent; i++ { + s += " " + } + s += v[0] + " : " + v[1] + } + default: + if typeInfo { + s += fmt.Sprintf("[%T] %+v", m, m) + } else { + s += fmt.Sprintf("%+v", m) + } + } + return s +} + +// ======================== utility =============== + +type mapList [][2]string + +func (ml mapList) Len() int { + return len(ml) +} + +func (ml mapList) Swap(i, j int) { + ml[i], ml[j] = ml[j], ml[i] +} + +func (ml mapList) Less(i, j int) bool { + return ml[i][0] <= ml[j][0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/newmap.go b/vendor/github.com/clbanning/mxj/v2/newmap.go new file mode 100644 index 0000000000..b293949056 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/newmap.go @@ -0,0 +1,184 @@ +// mxj - A collection of map[string]interface{} and associated XML and JSON utilities. +// Copyright 2012-2014, 2018 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// remap.go - build a new Map from the current Map based on keyOld:keyNew mapppings +// keys can use dot-notation, keyOld can use wildcard, '*' +// +// Computational strategy - +// Using the key path - []string - traverse a new map[string]interface{} and +// insert the oldVal as the newVal when we arrive at the end of the path. +// If the type at the end is nil, then that is newVal +// If the type at the end is a singleton (string, float64, bool) an array is created. +// If the type at the end is an array, newVal is just appended. +// If the type at the end is a map, it is inserted if possible or the map value +// is converted into an array if necessary. + +package mxj + +import ( + "errors" + "strings" +) + +// (Map)NewMap - create a new Map from data in the current Map. +// 'keypairs' are key mappings "oldKey:newKey" and specify that the current value of 'oldKey' +// should be the value for 'newKey' in the returned Map. +// - 'oldKey' supports dot-notation as described for (Map)ValuesForPath() +// - 'newKey' supports dot-notation but with no wildcards, '*', or indexed arrays +// - "oldKey" is shorthand for the keypair value "oldKey:oldKey" +// - "oldKey:" and ":newKey" are invalid keypair values +// - if 'oldKey' does not exist in the current Map, it is not written to the new Map. +// "null" is not supported unless it is the current Map. +// - see newmap_test.go for several syntax examples +// - mv.NewMap() == mxj.New() +// +// NOTE: "examples/partial.go" shows how to create arbitrary sub-docs of an XML doc. +func (mv Map) NewMap(keypairs ...string) (Map, error) { + n := make(map[string]interface{}, 0) + if len(keypairs) == 0 { + return n, nil + } + + // loop through the pairs + var oldKey, newKey string + var path []string + for _, v := range keypairs { + if len(v) == 0 { + continue // just skip over empty keypair arguments + } + + // initialize oldKey, newKey and check + vv := strings.Split(v, ":") + if len(vv) > 2 { + return n, errors.New("oldKey:newKey keypair value not valid - " + v) + } + if len(vv) == 1 { + oldKey, newKey = vv[0], vv[0] + } else { + oldKey, newKey = vv[0], vv[1] + } + strings.TrimSpace(oldKey) + strings.TrimSpace(newKey) + if i := strings.Index(newKey, "*"); i > -1 { + return n, errors.New("newKey value cannot contain wildcard character - " + v) + } + if i := strings.Index(newKey, "["); i > -1 { + return n, errors.New("newKey value cannot contain indexed arrays - " + v) + } + if oldKey == "" || newKey == "" { + return n, errors.New("oldKey or newKey is not specified - " + v) + } + + // get oldKey value + oldVal, err := mv.ValuesForPath(oldKey) + if err != nil { + return n, err + } + if len(oldVal) == 0 { + continue // oldKey has no value, may not exist in mv + } + + // break down path + path = strings.Split(newKey, ".") + if path[len(path)-1] == "" { // ignore a trailing dot in newKey spec + path = path[:len(path)-1] + } + + addNewVal(&n, path, oldVal) + } + + return n, nil +} + +// navigate 'n' to end of path and add val +func addNewVal(n *map[string]interface{}, path []string, val []interface{}) { + // newVal - either singleton or array + var newVal interface{} + if len(val) == 1 { + newVal = val[0] // is type interface{} + } else { + newVal = interface{}(val) + } + + // walk to the position of interest, create it if necessary + m := (*n) // initialize map walker + var k string // key for m + lp := len(path) - 1 // when to stop looking + for i := 0; i < len(path); i++ { + k = path[i] + if i == lp { + break + } + var nm map[string]interface{} // holds position of next-map + switch m[k].(type) { + case nil: // need a map for next node in path, so go there + nm = make(map[string]interface{}, 0) + m[k] = interface{}(nm) + m = m[k].(map[string]interface{}) + case map[string]interface{}: + // OK - got somewhere to walk to, go there + m = m[k].(map[string]interface{}) + case []interface{}: + // add a map and nm points to new map unless there's already + // a map in the array, then nm points there + // The placement of the next value in the array is dependent + // on the sequence of members - could land on a map or a nil + // value first. TODO: how to test this. + a := make([]interface{}, 0) + var foundmap bool + for _, vv := range m[k].([]interface{}) { + switch vv.(type) { + case nil: // doesn't appear that this occurs, need a test case + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + foundmap = true + case map[string]interface{}: + if foundmap { // use the first one in array + a = append(a, vv) + continue + } + nm = vv.(map[string]interface{}) + a = append(a, vv) + foundmap = true + default: + a = append(a, vv) + } + } + // no map found in array + if !foundmap { + nm = make(map[string]interface{}, 0) + a = append(a, interface{}(nm)) + } + m[k] = interface{}(a) // must insert in map + m = nm + default: // it's a string, float, bool, etc. + aa := make([]interface{}, 0) + nm = make(map[string]interface{}, 0) + aa = append(aa, m[k], nm) + m[k] = interface{}(aa) + m = nm + } + } + + // value is nil, array or a singleton of some kind + // initially m.(type) == map[string]interface{} + v := m[k] + switch v.(type) { + case nil: // initialized + m[k] = newVal + case []interface{}: + a := m[k].([]interface{}) + a = append(a, newVal) + m[k] = interface{}(a) + default: // v exists:string, float64, bool, map[string]interface, etc. + a := make([]interface{}, 0) + a = append(a, v, newVal) + m[k] = interface{}(a) + } +} diff --git a/vendor/github.com/clbanning/mxj/v2/readme.md b/vendor/github.com/clbanning/mxj/v2/readme.md new file mode 100644 index 0000000000..323a747d4d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/readme.md @@ -0,0 +1,207 @@ +

mxj - to/from maps, XML and JSON

+Decode/encode XML to/from map[string]interface{} (or JSON) values, and extract/modify values from maps by key or key-path, including wildcards. + +mxj supplants the legacy x2j and j2x packages. If you want the old syntax, use mxj/x2j and mxj/j2x packages. + +

Installation

+Using go.mod: +
+go get github.com/clbanning/mxj/v2@v2.3.2
+
+ +
+import "github.com/clbanning/mxj/v2"
+
+ +... or just vendor the package. + +

Related Packages

+ +https://github.com/clbanning/checkxml provides functions for validating XML data. + +

Refactor Encoder - 2020.05.01

+Issue #70 highlighted that encoding large maps does not scale well, since the original logic used string appends operations. Using bytes.Buffer results in linear scaling for very large XML docs. (Metrics based on MacBook Pro i7 w/ 16 GB.) + + Nodes m.XML() time + 54809 12.53708ms + 109780 32.403183ms + 164678 59.826412ms + 482598 109.358007ms + +

Refactor Decoder - 2015.11.15

+For over a year I've wanted to refactor the XML-to-map[string]interface{} decoder to make it more performant. I recently took the time to do that, since we were using github.com/clbanning/mxj in a production system that could be deployed on a Raspberry Pi. Now the decoder is comparable to the stdlib JSON-to-map[string]interface{} decoder in terms of its additional processing overhead relative to decoding to a structure value. As shown by: + + BenchmarkNewMapXml-4 100000 18043 ns/op + BenchmarkNewStructXml-4 100000 14892 ns/op + BenchmarkNewMapJson-4 300000 4633 ns/op + BenchmarkNewStructJson-4 300000 3427 ns/op + BenchmarkNewMapXmlBooks-4 20000 82850 ns/op + BenchmarkNewStructXmlBooks-4 20000 67822 ns/op + BenchmarkNewMapJsonBooks-4 100000 17222 ns/op + BenchmarkNewStructJsonBooks-4 100000 15309 ns/op + +

Notices

+ + 2021.02.02: v2.5 - add XmlCheckIsValid toggle to force checking that the encoded XML is valid + 2020.12.14: v2.4 - add XMLEscapeCharsDecoder to preserve XML escaped characters in Map values + 2020.10.28: v2.3 - add TrimWhiteSpace option + 2020.05.01: v2.2 - optimize map to XML encoding for large XML docs. + 2019.07.04: v2.0 - remove unnecessary methods - mv.XmlWriterRaw, mv.XmlIndentWriterRaw - for Map and MapSeq. + 2019.07.04: Add MapSeq type and move associated functions and methods from Map to MapSeq. + 2019.01.21: DecodeSimpleValuesAsMap - decode to map[:map["#text":]] rather than map[:] + 2018.04.18: mv.Xml/mv.XmlIndent encodes non-map[string]interface{} map values - map[string]string, map[int]uint, etc. + 2018.03.29: mv.Gob/NewMapGob support gob encoding/decoding of Maps. + 2018.03.26: Added mxj/x2j-wrapper sub-package for migrating from legacy x2j package. + 2017.02.22: LeafNode paths can use ".N" syntax rather than "[N]" for list member indexing. + 2017.02.10: SetFieldSeparator changes field separator for args in UpdateValuesForPath, ValuesFor... methods. + 2017.02.06: Support XMPP stream processing - HandleXMPPStreamTag(). + 2016.11.07: Preserve name space prefix syntax in XmlSeq parser - NewMapXmlSeq(), etc. + 2016.06.25: Support overriding default XML attribute prefix, "-", in Map keys - SetAttrPrefix(). + 2016.05.26: Support customization of xml.Decoder by exposing CustomDecoder variable. + 2016.03.19: Escape invalid chars when encoding XML attribute and element values - XMLEscapeChars(). + 2016.03.02: By default decoding XML with float64 and bool value casting will not cast "NaN", "Inf", and "-Inf". + To cast them to float64, first set flag with CastNanInf(true). + 2016.02.22: New mv.Root(), mv.Elements(), mv.Attributes methods let you examine XML document structure. + 2016.02.16: Add CoerceKeysToLower() option to handle tags with mixed capitalization. + 2016.02.12: Seek for first xml.StartElement token; only return error if io.EOF is reached first (handles BOM). + 2015.12.02: XML decoding/encoding that preserves original structure of document. See NewMapXmlSeq() + and mv.XmlSeq() / mv.XmlSeqIndent(). + 2015-05-20: New: mv.StringIndentNoTypeInfo(). + Also, alphabetically sort map[string]interface{} values by key to prettify output for mv.Xml(), + mv.XmlIndent(), mv.StringIndent(), mv.StringIndentNoTypeInfo(). + 2014-11-09: IncludeTagSeqNum() adds "_seq" key with XML doc positional information. + (NOTE: PreserveXmlList() is similar and will be here soon.) + 2014-09-18: inspired by NYTimes fork, added PrependAttrWithHyphen() to allow stripping hyphen from attribute tag. + 2014-08-02: AnyXml() and AnyXmlIndent() will try to marshal arbitrary values to XML. + 2014-04-28: ValuesForPath() and NewMap() now accept path with indexed array references. + +

Basic Unmarshal XML to map[string]interface{}

+
type Map map[string]interface{}
+ +Create a `Map` value, 'mv', from any `map[string]interface{}` value, 'v': +
mv := Map(v)
+ +Unmarshal / marshal XML as a `Map` value, 'mv': +
mv, err := NewMapXml(xmlValue) // unmarshal
+xmlValue, err := mv.Xml()      // marshal
+ +Unmarshal XML from an `io.Reader` as a `Map` value, 'mv': +
mv, err := NewMapXmlReader(xmlReader)         // repeated calls, as with an os.File Reader, will process stream
+mv, raw, err := NewMapXmlReaderRaw(xmlReader) // 'raw' is the raw XML that was decoded
+ +Marshal `Map` value, 'mv', to an XML Writer (`io.Writer`): +
err := mv.XmlWriter(xmlWriter)
+raw, err := mv.XmlWriterRaw(xmlWriter) // 'raw' is the raw XML that was written on xmlWriter
+ +Also, for prettified output: +
xmlValue, err := mv.XmlIndent(prefix, indent, ...)
+err := mv.XmlIndentWriter(xmlWriter, prefix, indent, ...)
+raw, err := mv.XmlIndentWriterRaw(xmlWriter, prefix, indent, ...)
+ +Bulk process XML with error handling (note: handlers must return a boolean value): +
err := HandleXmlReader(xmlReader, mapHandler(Map), errHandler(error))
+err := HandleXmlReaderRaw(xmlReader, mapHandler(Map, []byte), errHandler(error, []byte))
+ +Converting XML to JSON: see Examples for `NewMapXml` and `HandleXmlReader`. + +There are comparable functions and methods for JSON processing. + +Arbitrary structure values can be decoded to / encoded from `Map` values: +
mv, err := NewMapStruct(structVal)
+err := mv.Struct(structPointer)
+ +

Extract / modify Map values

+To work with XML tag values, JSON or Map key values or structure field values, decode the XML, JSON +or structure to a `Map` value, 'mv', or cast a `map[string]interface{}` value to a `Map` value, 'mv', then: +
paths := mv.PathsForKey(key)
+path := mv.PathForKeyShortest(key)
+values, err := mv.ValuesForKey(key, subkeys)
+values, err := mv.ValuesForPath(path, subkeys)
+count, err := mv.UpdateValuesForPath(newVal, path, subkeys)
+ +Get everything at once, irrespective of path depth: +
leafnodes := mv.LeafNodes()
+leafvalues := mv.LeafValues()
+ +A new `Map` with whatever keys are desired can be created from the current `Map` and then encoded in XML +or JSON. (Note: keys can use dot-notation.) +
newMap, err := mv.NewMap("oldKey_1:newKey_1", "oldKey_2:newKey_2", ..., "oldKey_N:newKey_N")
+newMap, err := mv.NewMap("oldKey1", "oldKey3", "oldKey5") // a subset of 'mv'; see "examples/partial.go"
+newXml, err := newMap.Xml()   // for example
+newJson, err := newMap.Json() // ditto
+ +

Usage

+ +The package is fairly well [self-documented with examples](http://godoc.org/github.com/clbanning/mxj). + +Also, the subdirectory "examples" contains a wide range of examples, several taken from golang-nuts discussions. + +

XML parsing conventions

+ +Using NewMapXml() + + - Attributes are parsed to `map[string]interface{}` values by prefixing a hyphen, `-`, + to the attribute label. (Unless overridden by `PrependAttrWithHyphen(false)` or + `SetAttrPrefix()`.) + - If the element is a simple element and has attributes, the element value + is given the key `#text` for its `map[string]interface{}` representation. (See + the 'atomFeedString.xml' test data, below.) + - XML comments, directives, and process instructions are ignored. + - If CoerceKeysToLower() has been called, then the resultant keys will be lower case. + +Using NewMapXmlSeq() + + - Attributes are parsed to `map["#attr"]map[]map[string]interface{}`values + where the `` value has "#text" and "#seq" keys - the "#text" key holds the + value for ``. + - All elements, except for the root, have a "#seq" key. + - Comments, directives, and process instructions are unmarshalled into the Map using the + keys "#comment", "#directive", and "#procinst", respectively. (See documentation for more + specifics.) + - Name space syntax is preserved: + - `something` parses to `map["ns:key"]interface{}{"something"}` + - `xmlns:ns="http://myns.com/ns"` parses to `map["xmlns:ns"]interface{}{"http://myns.com/ns"}` + +Both + + - By default, "Nan", "Inf", and "-Inf" values are not cast to float64. If you want them + to be cast, set a flag to cast them using CastNanInf(true). + +

XML encoding conventions

+ + - 'nil' `Map` values, which may represent 'null' JSON values, are encoded as ``. + NOTE: the operation is not symmetric as `` elements are decoded as `tag:""` `Map` values, + which, then, encode in JSON as `"tag":""` values. + - ALSO: there is no guarantee that the encoded XML doc will be the same as the decoded one. (Go + randomizes the walk through map[string]interface{} values.) If you plan to re-encode the + Map value to XML and want the same sequencing of elements look at NewMapXmlSeq() and + mv.XmlSeq() - these try to preserve the element sequencing but with added complexity when + working with the Map representation. + +

Running "go test"

+ +Because there are no guarantees on the sequence map elements are retrieved, the tests have been +written for visual verification in most cases. One advantage is that you can easily use the +output from running "go test" as examples of calling the various functions and methods. + +

Motivation

+ +I make extensive use of JSON for messaging and typically unmarshal the messages into +`map[string]interface{}` values. This is easily done using `json.Unmarshal` from the +standard Go libraries. Unfortunately, many legacy solutions use structured +XML messages; in those environments the applications would have to be refactored to +interoperate with my components. + +The better solution is to just provide an alternative HTTP handler that receives +XML messages and parses it into a `map[string]interface{}` value and then reuse +all the JSON-based code. The Go `xml.Unmarshal()` function does not provide the same +option of unmarshaling XML messages into `map[string]interface{}` values. So I wrote +a couple of small functions to fill this gap and released them as the x2j package. + +Over the next year and a half additional features were added, and the companion j2x +package was released to address XML encoding of arbitrary JSON and `map[string]interface{}` +values. As part of a refactoring of our production system and looking at how we had been +using the x2j and j2x packages we found that we rarely performed direct XML-to-JSON or +JSON-to_XML conversion and that working with the XML or JSON as `map[string]interface{}` +values was the primary value. Thus, everything was refactored into the mxj package. + diff --git a/vendor/github.com/clbanning/mxj/v2/remove.go b/vendor/github.com/clbanning/mxj/v2/remove.go new file mode 100644 index 0000000000..8362ab17fa --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/remove.go @@ -0,0 +1,37 @@ +package mxj + +import "strings" + +// Removes the path. +func (mv Map) Remove(path string) error { + m := map[string]interface{}(mv) + return remove(m, path) +} + +func remove(m interface{}, path string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + lastKey := lastKey(path) + delete(val, lastKey) + + return nil +} + +// returns the last key of the path. +// lastKey("a.b.c") would had returned "c" +func lastKey(path string) string { + keys := strings.Split(path, ".") + key := keys[len(keys)-1] + return key +} + +// returns the path without the last key +// parentPath("a.b.c") whould had returned "a.b" +func parentPath(path string) string { + keys := strings.Split(path, ".") + parentPath := strings.Join(keys[0:len(keys)-1], ".") + return parentPath +} diff --git a/vendor/github.com/clbanning/mxj/v2/rename.go b/vendor/github.com/clbanning/mxj/v2/rename.go new file mode 100644 index 0000000000..4c655ed5d0 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/rename.go @@ -0,0 +1,61 @@ +package mxj + +import ( + "errors" + "strings" +) + +// RenameKey renames a key in a Map. +// It works only for nested maps. +// It doesn't work for cases when the key is in a list. +func (mv Map) RenameKey(path string, newName string) error { + var v bool + var err error + if v, err = mv.Exists(path); err == nil && !v { + return errors.New("RenameKey: path not found: " + path) + } else if err != nil { + return err + } + if v, err = mv.Exists(parentPath(path) + "." + newName); err == nil && v { + return errors.New("RenameKey: key already exists: " + newName) + } else if err != nil { + return err + } + + m := map[string]interface{}(mv) + return renameKey(m, path, newName) +} + +func renameKey(m interface{}, path string, newName string) error { + val, err := prevValueByPath(m, path) + if err != nil { + return err + } + + oldName := lastKey(path) + val[newName] = val[oldName] + delete(val, oldName) + + return nil +} + +// returns a value which contains a last key in the path +// For example: prevValueByPath("a.b.c", {a{b{c: 3}}}) returns {c: 3} +func prevValueByPath(m interface{}, path string) (map[string]interface{}, error) { + keys := strings.Split(path, ".") + + switch mValue := m.(type) { + case map[string]interface{}: + for key, value := range mValue { + if key == keys[0] { + if len(keys) == 1 { + return mValue, nil + } else { + // keep looking for the full path to the key + return prevValueByPath(value, strings.Join(keys[1:], ".")) + } + } + } + } + return nil, errors.New("prevValueByPath: didn't find path – " + path) +} diff --git a/vendor/github.com/clbanning/mxj/v2/set.go b/vendor/github.com/clbanning/mxj/v2/set.go new file mode 100644 index 0000000000..a297fc3888 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/set.go @@ -0,0 +1,26 @@ +package mxj + +import ( + "strings" +) + +// Sets the value for the path +func (mv Map) SetValueForPath(value interface{}, path string) error { + pathAry := strings.Split(path, ".") + parentPathAry := pathAry[0 : len(pathAry)-1] + parentPath := strings.Join(parentPathAry, ".") + + val, err := mv.ValueForPath(parentPath) + if err != nil { + return err + } + if val == nil { + return nil // we just ignore the request if there's no val + } + + key := pathAry[len(pathAry)-1] + cVal := val.(map[string]interface{}) + cVal[key] = value + + return nil +} diff --git a/vendor/github.com/clbanning/mxj/v2/setfieldsep.go b/vendor/github.com/clbanning/mxj/v2/setfieldsep.go new file mode 100644 index 0000000000..b70715ebc6 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/setfieldsep.go @@ -0,0 +1,20 @@ +package mxj + +// Per: https://github.com/clbanning/mxj/issues/37#issuecomment-278651862 +var fieldSep string = ":" + +// SetFieldSeparator changes the default field separator, ":", for the +// newVal argument in mv.UpdateValuesForPath and the optional 'subkey' arguments +// in mv.ValuesForKey and mv.ValuesForPath. +// +// E.g., if the newVal value is "http://blah/blah", setting the field separator +// to "|" will allow the newVal specification, "|http://blah/blah" to parse +// properly. If called with no argument or an empty string value, the field +// separator is set to the default, ":". +func SetFieldSeparator(s ...string) { + if len(s) == 0 || s[0] == "" { + fieldSep = ":" // the default + return + } + fieldSep = s[0] +} diff --git a/vendor/github.com/clbanning/mxj/v2/songtext.xml b/vendor/github.com/clbanning/mxj/v2/songtext.xml new file mode 100644 index 0000000000..8c0f2becb1 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/songtext.xml @@ -0,0 +1,29 @@ + + help me! + + + + Henry was a renegade + Didn't like to play it safe + One component at a time + There's got to be a better way + Oh, people came from miles around + Searching for a steady job + Welcome to the Motor Town + Booming like an atom bomb + + + Oh, Henry was the end of the story + Then everything went wrong + And we'll return it to its former glory + But it just takes so long + + + + It's going to take a long time + It's going to take it, but we'll make it one day + It's going to take a long time + It's going to take it, but we'll make it one day + + + diff --git a/vendor/github.com/clbanning/mxj/v2/strict.go b/vendor/github.com/clbanning/mxj/v2/strict.go new file mode 100644 index 0000000000..1e769560ba --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/strict.go @@ -0,0 +1,30 @@ +// Copyright 2016 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// strict.go actually addresses setting xml.Decoder attribute +// values. This'll let you parse non-standard XML. + +package mxj + +import ( + "encoding/xml" +) + +// CustomDecoder can be used to specify xml.Decoder attribute +// values, e.g., Strict:false, to be used. By default CustomDecoder +// is nil. If CustomeDecoder != nil, then mxj.XmlCharsetReader variable is +// ignored and must be set as part of the CustomDecoder value, if needed. +// Usage: +// mxj.CustomDecoder = &xml.Decoder{Strict:false} +var CustomDecoder *xml.Decoder + +// useCustomDecoder copy over public attributes from customDecoder +func useCustomDecoder(d *xml.Decoder) { + d.Strict = CustomDecoder.Strict + d.AutoClose = CustomDecoder.AutoClose + d.Entity = CustomDecoder.Entity + d.CharsetReader = CustomDecoder.CharsetReader + d.DefaultSpace = CustomDecoder.DefaultSpace +} + diff --git a/vendor/github.com/clbanning/mxj/v2/struct.go b/vendor/github.com/clbanning/mxj/v2/struct.go new file mode 100644 index 0000000000..9be636cdca --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/struct.go @@ -0,0 +1,54 @@ +// Copyright 2012-2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +import ( + "encoding/json" + "errors" + "reflect" + + // "github.com/fatih/structs" +) + +// Create a new Map value from a structure. Error returned if argument is not a structure. +// Only public structure fields are decoded in the Map value. See github.com/fatih/structs#Map +// for handling of "structs" tags. + +// DEPRECATED - import github.com/fatih/structs and cast result of structs.Map to mxj.Map. +// import "github.com/fatih/structs" +// ... +// sm, err := structs.Map() +// if err != nil { +// // handle error +// } +// m := mxj.Map(sm) +// Alernatively uncomment the old source and import in struct.go. +func NewMapStruct(structVal interface{}) (Map, error) { + return nil, errors.New("deprecated - see package documentation") + /* + if !structs.IsStruct(structVal) { + return nil, errors.New("NewMapStruct() error: argument is not type Struct") + } + return structs.Map(structVal), nil + */ +} + +// Marshal a map[string]interface{} into a structure referenced by 'structPtr'. Error returned +// if argument is not a pointer or if json.Unmarshal returns an error. +// json.Unmarshal structure encoding rules are followed to encode public structure fields. +func (mv Map) Struct(structPtr interface{}) error { + // should check that we're getting a pointer. + if reflect.ValueOf(structPtr).Kind() != reflect.Ptr { + return errors.New("mv.Struct() error: argument is not type Ptr") + } + + m := map[string]interface{}(mv) + j, err := json.Marshal(m) + if err != nil { + return err + } + + return json.Unmarshal(j, structPtr) +} diff --git a/vendor/github.com/clbanning/mxj/v2/updatevalues.go b/vendor/github.com/clbanning/mxj/v2/updatevalues.go new file mode 100644 index 0000000000..9e10d84e8d --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/updatevalues.go @@ -0,0 +1,258 @@ +// Copyright 2012-2014, 2017 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// updatevalues.go - modify a value based on path and possibly sub-keys +// TODO(clb): handle simple elements with attributes and NewMapXmlSeq Map values. + +package mxj + +import ( + "fmt" + "strconv" + "strings" +) + +// Update value based on path and possible sub-key values. +// A count of the number of values changed and any error are returned. +// If the count == 0, then no path (and subkeys) matched. +// 'newVal' can be a Map or map[string]interface{} value with a single 'key' that is the key to be modified +// or a string value "key:value[:type]" where type is "bool" or "num" to cast the value. +// 'path' is dot-notation list of keys to traverse; last key in path can be newVal key +// NOTE: 'path' spec does not currently support indexed array references. +// 'subkeys' are "key:value[:type]" entries that must match for path node +// - For attributes prefix the label with the attribute prefix character, by default a +// hyphen, '-', e.g., "-seq:3". (See SetAttrPrefix function.) +// - The subkey can be wildcarded - "key:*" - to require that it's there with some value. +// - If a subkey is preceeded with the '!' character, the key:value[:type] entry is treated as an +// exclusion critera - e.g., "!author:William T. Gaddis". +// +// NOTES: +// 1. Simple elements with attributes need a path terminated as ".#text" to modify the actual value. +// 2. Values in Maps created using NewMapXmlSeq are map[string]interface{} values with a "#text" key. +// 3. If values in 'newVal' or 'subkeys' args contain ":", use SetFieldSeparator to an unused symbol, +// perhaps "|". +func (mv Map) UpdateValuesForPath(newVal interface{}, path string, subkeys ...string) (int, error) { + m := map[string]interface{}(mv) + + // extract the subkeys + var subKeyMap map[string]interface{} + if len(subkeys) > 0 { + var err error + subKeyMap, err = getSubKeyMap(subkeys...) + if err != nil { + return 0, err + } + } + + // extract key and value from newVal + var key string + var val interface{} + switch newVal.(type) { + case map[string]interface{}, Map: + switch newVal.(type) { // "fallthrough is not permitted in type switch" (Spec) + case Map: + newVal = newVal.(Map).Old() + } + if len(newVal.(map[string]interface{})) != 1 { + return 0, fmt.Errorf("newVal map can only have len == 1 - %+v", newVal) + } + for key, val = range newVal.(map[string]interface{}) { + } + case string: // split it as a key:value pair + ss := strings.Split(newVal.(string), fieldSep) + n := len(ss) + if n < 2 || n > 3 { + return 0, fmt.Errorf("unknown newVal spec - %+v", newVal) + } + key = ss[0] + if n == 2 { + val = interface{}(ss[1]) + } else if n == 3 { + switch ss[2] { + case "bool", "boolean": + nv, err := strconv.ParseBool(ss[1]) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to bool - %+v", newVal) + } + val = interface{}(nv) + case "num", "numeric", "float", "int": + nv, err := strconv.ParseFloat(ss[1], 64) + if err != nil { + return 0, fmt.Errorf("can't convert newVal to float64 - %+v", newVal) + } + val = interface{}(nv) + default: + return 0, fmt.Errorf("unknown type for newVal value - %+v", newVal) + } + } + default: + return 0, fmt.Errorf("invalid newVal type - %+v", newVal) + } + + // parse path + keys := strings.Split(path, ".") + + var count int + updateValuesForKeyPath(key, val, m, keys, subKeyMap, &count) + + return count, nil +} + +// navigate the path +func updateValuesForKeyPath(key string, value interface{}, m interface{}, keys []string, subkeys map[string]interface{}, cnt *int) { + // ----- at end node: looking at possible node to get 'key' ---- + if len(keys) == 1 { + updateValue(key, value, m, keys[0], subkeys, cnt) + return + } + + // ----- here we are navigating the path thru the penultimate node -------- + // key of interest is keys[0] - the next in the path + switch keys[0] { + case "*": // wildcard - scan all values + switch m.(type) { + case map[string]interface{}: + for _, v := range m.(map[string]interface{}) { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: + for _, v := range m.([]interface{}) { + switch v.(type) { + // flatten out a list of maps - keys are processed + case map[string]interface{}: + for _, vv := range v.(map[string]interface{}) { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + default: + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + } + } + default: // key - must be map[string]interface{} + switch m.(type) { + case map[string]interface{}: + if v, ok := m.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, v, keys[1:], subkeys, cnt) + } + case []interface{}: // may be buried in list + for _, v := range m.([]interface{}) { + switch v.(type) { + case map[string]interface{}: + if vv, ok := v.(map[string]interface{})[keys[0]]; ok { + updateValuesForKeyPath(key, value, vv, keys[1:], subkeys, cnt) + } + } + } + } + } +} + +// change value if key and subkeys are present +func updateValue(key string, value interface{}, m interface{}, keys0 string, subkeys map[string]interface{}, cnt *int) { + // there are two possible options for the value of 'keys0': map[string]interface, []interface{} + // and 'key' is a key in the map or is a key in a map in a list. + switch m.(type) { + case map[string]interface{}: // gotta have the last key + if keys0 == "*" { + for k := range m.(map[string]interface{}) { + updateValue(key, value, m, k, subkeys, cnt) + } + return + } + endVal, _ := m.(map[string]interface{})[keys0] + + // if newV key is the end of path, replace the value for path-end + // may be []interface{} - means replace just an entry w/ subkeys + // otherwise replace the keys0 value if subkeys are there + // NOTE: this will replace the subkeys, also + if key == keys0 { + switch endVal.(type) { + case map[string]interface{}: + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + case []interface{}: + // without subkeys can't select list member to modify + // so key:value spec is it ... + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + break + } + nv := make([]interface{}, 0) + var valmodified bool + for _, v := range endVal.([]interface{}) { + // check entry subkeys + if hasSubKeys(v, subkeys) { + // replace v with value + nv = append(nv, value) + valmodified = true + (*cnt)++ + continue + } + nv = append(nv, v) + } + if valmodified { + (m.(map[string]interface{}))[keys0] = interface{}(nv) + } + default: // anything else is a strict replacement + if hasSubKeys(m, subkeys) { + (m.(map[string]interface{}))[keys0] = value + (*cnt)++ + } + } + return + } + + // so value is for an element of endVal + // if endVal is a map then 'key' must be there w/ subkeys + // if endVal is a list then 'key' must be in a list member w/ subkeys + switch endVal.(type) { + case map[string]interface{}: + if !hasSubKeys(endVal, subkeys) { + return + } + if _, ok := (endVal.(map[string]interface{}))[key]; ok { + (endVal.(map[string]interface{}))[key] = value + (*cnt)++ + } + case []interface{}: // keys0 points to a list, check subkeys + for _, v := range endVal.([]interface{}) { + // got to be a map so we can replace value for 'key' + vv, vok := v.(map[string]interface{}) + if !vok { + continue + } + if _, ok := vv[key]; !ok { + continue + } + if !hasSubKeys(vv, subkeys) { + continue + } + vv[key] = value + (*cnt)++ + } + } + case []interface{}: // key may be in a list member + // don't need to handle keys0 == "*"; we're looking at everything, anyway. + for _, v := range m.([]interface{}) { + // only map values - we're looking for 'key' + mm, ok := v.(map[string]interface{}) + if !ok { + continue + } + if _, ok := mm[key]; !ok { + continue + } + if !hasSubKeys(mm, subkeys) { + continue + } + mm[key] = value + (*cnt)++ + } + } + + // return +} diff --git a/vendor/github.com/clbanning/mxj/v2/xml.go b/vendor/github.com/clbanning/mxj/v2/xml.go new file mode 100644 index 0000000000..2ea1bc25a8 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xml.go @@ -0,0 +1,1414 @@ +// Copyright 2012-2016, 2018-2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xml.go - basically the core of X2j for map[string]interface{} values. +// NewMapXml, NewMapXmlReader, mv.Xml, mv.XmlWriter +// see x2j and j2x for wrappers to provide end-to-end transformation of XML and JSON messages. + +package mxj + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ------------------- NewMapXml & NewMapXmlReader ... ------------------------- + +// If XmlCharsetReader != nil, it will be used to decode the XML, if required. +// Note: if CustomDecoder != nil, then XmlCharsetReader is ignored; +// set the CustomDecoder attribute instead. +// import ( +// charset "code.google.com/p/go-charset/charset" +// github.com/clbanning/mxj +// ) +// ... +// mxj.XmlCharsetReader = charset.NewReader +// m, merr := mxj.NewMapXml(xmlValue) +var XmlCharsetReader func(charset string, input io.Reader) (io.Reader, error) + +// NewMapXml - convert a XML doc into a Map +// (This is analogous to unmarshalling a JSON string to map[string]interface{} using json.Unmarshal().) +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// +// Converting XML to JSON is a simple as: +// ... +// mapVal, merr := mxj.NewMapXml(xmlVal) +// if merr != nil { +// // handle error +// } +// jsonVal, jerr := mapVal.Json() +// if jerr != nil { +// // handle error +// } +// +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. The 'xmlVal' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 3. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// 5. If DisableTrimWhiteSpace(b bool) has been called, then all values will be trimmed or not. 'true' by default. +func NewMapXml(xmlVal []byte, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlToMap(xmlVal, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value. +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 3. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 4. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReader(xmlReader io.Reader, cast ...bool) (Map, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlReaderToMap(xmlReader, r) +} + +// Get next XML doc from an io.Reader as a Map value. Returns Map value and slice with the raw XML. +// NOTES: +// 1. Declarations, directives, process instructions and comments are NOT parsed. +// 2. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 3. The 'raw' return value may be larger than the XML text value. +// 4. The 'xmlReader' will be parsed looking for an xml.StartElement, so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 5. If CoerceKeysToLower() has been called, then all key values will be lower case. +// 6. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +func NewMapXmlReaderRaw(xmlReader io.Reader, cast ...bool) (Map, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) // see code at EOF + + m, err := xmlReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + if err != nil { + return nil, b, err + } + + return m, b, nil +} + +// xmlReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// xmlToMap - convert a XML doc into map[string]interface{} value +func xmlToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// PrependAttrWithHyphen. Prepend attribute tags with a hyphen. +// Default is 'true'. (Not applicable to NewMapXmlSeq(), mv.XmlSeq(), etc.) +// Note: +// If 'false', unmarshaling and marshaling is not symmetric. Attributes will be +// marshal'd as attr and may be part of a list. +func PrependAttrWithHyphen(v bool) { + if v { + attrPrefix = "-" + lenAttrPrefix = len(attrPrefix) + return + } + attrPrefix = "" + lenAttrPrefix = len(attrPrefix) +} + +// Include sequence id with inner tags. - per Sean Murphy, murphysean84@gmail.com. +var includeTagSeqNum bool + +// IncludeTagSeqNum - include a "_seq":N key:value pair with each inner tag, denoting +// its position when parsed. This is of limited usefulness, since list values cannot +// be tagged with "_seq" without changing their depth in the Map. +// So THIS SHOULD BE USED WITH CAUTION - see the test cases. Here's a sample of what +// you get. +/* + + + + + hello + + + parses as: + + { + Obj:{ + "-c":"la", + "-h":"da", + "-x":"dee", + "intObj":[ + { + "-id"="3", + "_seq":"0" // if mxj.Cast is passed, then: "_seq":0 + }, + { + "-id"="2", + "_seq":"2" + }], + "intObj1":{ + "-id":"1", + "_seq":"1" + }, + "StrObj":{ + "#text":"hello", // simple element value gets "#text" tag + "_seq":"3" + } + } + } +*/ +func IncludeTagSeqNum(b ...bool) { + if len(b) == 0 { + includeTagSeqNum = !includeTagSeqNum + } else if len(b) == 1 { + includeTagSeqNum = b[0] + } +} + +// all keys will be "lower case" +var lowerCase bool + +// Coerce all tag values to keys in lower case. This is useful if you've got sources with variable +// tag capitalization, and you want to use m.ValuesForKeys(), etc., with the key or path spec +// in lower case. +// CoerceKeysToLower() will toggle the coercion flag true|false - on|off +// CoerceKeysToLower(true|false) will set the coercion flag on|off +// +// NOTE: only recognized by NewMapXml, NewMapXmlReader, and NewMapXmlReaderRaw functions as well as +// the associated HandleXmlReader and HandleXmlReaderRaw. +func CoerceKeysToLower(b ...bool) { + if len(b) == 0 { + lowerCase = !lowerCase + } else if len(b) == 1 { + lowerCase = b[0] + } +} + +// disableTrimWhiteSpace sets if the white space should be removed or not +var disableTrimWhiteSpace bool +var trimRunes = "\t\r\b\n " + +// DisableTrimWhiteSpace set if the white space should be trimmed or not. By default white space is always trimmed. If +// no argument is provided, trim white space will be disabled. +func DisableTrimWhiteSpace(b ...bool) { + if len(b) == 0 { + disableTrimWhiteSpace = true + } else { + disableTrimWhiteSpace = b[0] + } + + if disableTrimWhiteSpace { + trimRunes = "\t\r\b\n" + } else { + trimRunes = "\t\r\b\n " + } +} + +// 25jun16: Allow user to specify the "prefix" character for XML attribute key labels. +// We do this by replacing '`' constant with attrPrefix var, replacing useHyphen with attrPrefix = "", +// and adding a SetAttrPrefix(s string) function. + +var attrPrefix string = `-` // the default +var lenAttrPrefix int = 1 // the default + +// SetAttrPrefix changes the default, "-", to the specified value, s. +// SetAttrPrefix("") is the same as PrependAttrWithHyphen(false). +// (Not applicable for NewMapXmlSeq(), mv.XmlSeq(), etc.) +func SetAttrPrefix(s string) { + attrPrefix = s + lenAttrPrefix = len(attrPrefix) +} + +// 18jan17: Allows user to specify if the map keys should be in snake case instead +// of the default hyphenated notation. +var snakeCaseKeys bool + +// CoerceKeysToSnakeCase changes the default, false, to the specified value, b. +// Note: the attribute prefix will be a hyphen, '-', or what ever string value has +// been specified using SetAttrPrefix. +func CoerceKeysToSnakeCase(b ...bool) { + if len(b) == 0 { + snakeCaseKeys = !snakeCaseKeys + } else if len(b) == 1 { + snakeCaseKeys = b[0] + } +} + +// 10jan19: use of pull request #57 should be conditional - legacy code assumes +// numeric values are float64. +var castToInt bool + +// CastValuesToInt tries to coerce numeric valus to int64 or uint64 instead of the +// default float64. Repeated calls with no argument will toggle this on/off, or this +// handling will be set with the value of 'b'. +func CastValuesToInt(b ...bool) { + if len(b) == 0 { + castToInt = !castToInt + } else if len(b) == 1 { + castToInt = b[0] + } +} + +// 05feb17: support processing XMPP streams (issue #36) +var handleXMPPStreamTag bool + +// HandleXMPPStreamTag causes decoder to parse XMPP elements. +// If called with no argument, XMPP stream element handling is toggled on/off. +// (See xmppStream_test.go for example.) +// If called with NewMapXml, NewMapXmlReader, New MapXmlReaderRaw the "stream" +// element will be returned as: +// map["stream"]interface{}{map[-]interface{}}. +// If called with NewMapSeq, NewMapSeqReader, NewMapSeqReaderRaw the "stream" +// element will be returned as: +// map["stream:stream"]interface{}{map["#attr"]interface{}{map[string]interface{}}} +// where the "#attr" values have "#text" and "#seq" keys. (See NewMapXmlSeq.) +func HandleXMPPStreamTag(b ...bool) { + if len(b) == 0 { + handleXMPPStreamTag = !handleXMPPStreamTag + } else if len(b) == 1 { + handleXMPPStreamTag = b[0] + } +} + +// 21jan18 - decode all values as map["#text":value] (issue #56) +var decodeSimpleValuesAsMap bool + +// DecodeSimpleValuesAsMap forces all values to be decoded as map["#text":]. +// If called with no argument, the decoding is toggled on/off. +// +// By default the NewMapXml functions decode simple values without attributes as +// map[:]. This function causes simple values without attributes to be +// decoded the same as simple values with attributes - map[:map["#text":]]. +func DecodeSimpleValuesAsMap(b ...bool) { + if len(b) == 0 { + decodeSimpleValuesAsMap = !decodeSimpleValuesAsMap + } else if len(b) == 1 { + decodeSimpleValuesAsMap = b[0] + } +} + +// xmlToMapParser (2015.11.12) - load a 'clean' XML doc into a map[string]interface{} directly. +// A refactoring of xmlToTreeParser(), markDuplicate() and treeToMap() - here, all-in-one. +// We've removed the intermediate *node tree with the allocation and subsequent rescanning. +func xmlToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if lowerCase { + skey = strings.ToLower(skey) + } + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + // Unless 'skey' is a simple element w/o attributes, in which case the xml.CharData value is the value. + var n, na map[string]interface{} + var seq int // for includeTagSeqNum + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + n = make(map[string]interface{}) // old n + na = make(map[string]interface{}) // old n.nodes + if len(a) > 0 { + for _, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + var key string + key = attrPrefix + v.Name.Local + if lowerCase { + key = strings.ToLower(key) + } + if xmlEscapeCharsDecoder { // per issue#84 + v.Value = escapeChars(v.Value) + } + na[key] = cast(v.Value, r, key) + } + } + } + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.Token() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + return xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + nn, err := xmlToMapParser(tt.Name.Local, tt.Attr, p, r) + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + // Note: + // * if CoerceKeysToLower() called, then key will be lower case. + // * if CoerceKeysToSnakeCase() called, then key will be converted to snake case. + var key string + var val interface{} + for key, val = range nn { + break + } + + // IncludeTagSeqNum requests that the element be augmented with a "_seq" sub-element. + // In theory, we don't need this if len(na) == 1. But, we don't know what might + // come next - we're only parsing forward. So if you ask for 'includeTagSeqNum' you + // get it on every element. (Personally, I never liked this, but I added it on request + // and did get a $50 Amazon gift card in return - now we support it for backwards compatibility!) + if includeTagSeqNum { + switch val.(type) { + case []interface{}: + // noop - There's no clean way to handle this w/o changing message structure. + case map[string]interface{}: + val.(map[string]interface{})["_seq"] = seq // will overwrite an "_seq" XML tag + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val} + v["_seq"] = seq + seq++ + val = v + } + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Note: in original node-tree parser, val defaulted to ""; + // so we always had the default if len(node.nodes) == 0. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } else if len(n) == 1 && len(na) > 0 { + // it's a simple element w/ no attributes w/ subelements + for _, v := range n { + na["#text"] = v + } + n[skey] = na + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), trimRunes) + if xmlEscapeCharsDecoder { // issue#84 + tt = escapeChars(tt) + } + if len(tt) > 0 { + if len(na) > 0 || decodeSimpleValuesAsMap { + na["#text"] = cast(tt, r, "#text") + } else if skey != "" { + n[skey] = cast(tt, r, skey) + } else { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + } + default: + // noop + } + } +} + +var castNanInf bool + +// Cast "Nan", "Inf", "-Inf" XML values to 'float64'. +// By default, these values will be decoded as 'string'. +func CastNanInf(b ...bool) { + if len(b) == 0 { + castNanInf = !castNanInf + } else if len(b) == 1 { + castNanInf = b[0] + } +} + +// cast - try to cast string values to bool or float64 +// 't' is the tag key that can be checked for 'not-casting' +func cast(s string, r bool, t string) interface{} { + if checkTagToSkip != nil && t != "" && checkTagToSkip(t) { + // call the check-function here with 't[0]' + // if 'true' return s + return s + } + + if r { + // handle nan and inf + if !castNanInf { + switch strings.ToLower(s) { + case "nan", "inf", "-inf": + return s + } + } + + // handle numeric strings ahead of boolean + if castToInt { + if f, err := strconv.ParseInt(s, 10, 64); err == nil { + return f + } + if f, err := strconv.ParseUint(s, 10, 64); err == nil { + return f + } + } + + if castToFloat { + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + // ParseBool treats "1"==true & "0"==false, we've already scanned those + // values as float64. See if value has 't' or 'f' as initial screen to + // minimize calls to ParseBool; also, see if len(s) < 6. + if castToBool { + if len(s) > 0 && len(s) < 6 { + switch s[:1] { + case "t", "T", "f", "F": + if b, err := strconv.ParseBool(s); err == nil { + return b + } + } + } + } + } + return s +} + +// pull request, #59 +var castToFloat = true + +// CastValuesToFloat can be used to skip casting to float64 when +// "cast" argument is 'true' in NewMapXml, etc. +// Default is true. +func CastValuesToFloat(b ...bool) { + if len(b) == 0 { + castToFloat = !castToFloat + } else if len(b) == 1 { + castToFloat = b[0] + } +} + +var castToBool = true + +// CastValuesToBool can be used to skip casting to bool when +// "cast" argument is 'true' in NewMapXml, etc. +// Default is true. +func CastValuesToBool(b ...bool) { + if len(b) == 0 { + castToBool = !castToBool + } else if len(b) == 1 { + castToBool = b[0] + } +} + +// checkTagToSkip - switch to address Issue #58 + +var checkTagToSkip func(string) bool + +// SetCheckTagToSkipFunc registers function to test whether the value +// for a tag should be cast to bool or float64 when "cast" argument is 'true'. +// (Dot tag path notation is not supported.) +// NOTE: key may be "#text" if it's a simple element with attributes +// or "decodeSimpleValuesAsMap == true". +// NOTE: does not apply to NewMapXmlSeq... functions. +func SetCheckTagToSkipFunc(fn func(string) bool) { + checkTagToSkip = fn +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// ------------------ mv.Xml & mv.XmlWriter - from j2x ------------------------ + +const ( + DefaultRootTag = "doc" +) + +var useGoXmlEmptyElemSyntax bool + +// XmlGoEmptyElemSyntax() - rather than . +// Go's encoding/xml package marshals empty XML elements as . By default this package +// encodes empty elements as . If you're marshaling Map values that include structures +// (which are passed to xml.Marshal for encoding), this will let you conform to the standard package. +func XmlGoEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = true +} + +// XmlDefaultEmptyElemSyntax() - rather than . +// Return XML encoding for empty elements to the default package setting. +// Reverses effect of XmlGoEmptyElemSyntax(). +func XmlDefaultEmptyElemSyntax() { + useGoXmlEmptyElemSyntax = false +} + +// ------- issue #88 ---------- +// xmlCheckIsValid set switch to force decoding the encoded XML to +// see if it is valid XML. +var xmlCheckIsValid bool + +// XmlCheckIsValid forces the encoded XML to be checked for validity. +func XmlCheckIsValid(b ...bool) { + if len(b) == 1 { + xmlCheckIsValid = b[0] + return + } + xmlCheckIsValid = !xmlCheckIsValid +} + +// Encode a Map as XML. The companion of NewMapXml(). +// The following rules apply. +// - The key label "#text" is treated as the value for a simple element with attributes. +// - Map keys that begin with a hyphen, '-', are interpreted as attributes. +// It is an error if the attribute doesn't have a []byte, string, number, or boolean value. +// - Map value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>". +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +// - To encode empty elements in a syntax consistent with encoding/xml call UseGoXmlEmptyElementSyntax(). +// The attributes tag=value pairs are alphabetized by "tag". Also, when encoding map[string]interface{} values - +// complex elements, etc. - the key:value pairs are alphabetized by key so the resulting tags will appear sorted. +func (mv Map) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + b := new(bytes.Buffer) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p) + goto done + } + } + } + err = marshalMapToXmlIndent(false, b, key, value, p) + } + } else if len(rootTag) == 1 { + err = marshalMapToXmlIndent(false, b, rootTag[0], m, p) + } else { + err = marshalMapToXmlIndent(false, b, DefaultRootTag, m, p) + } +done: + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader(b.Bytes())) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return b.Bytes(), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// Writes the Map as XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +/* +func (mv Map) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// Writes the Map as pretty XML on the Writer. +// See Xml() for encoding rules. +func (mv Map) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// Writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Xml() for encoding rules. +/* +func (mv Map) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// -------------- Handle XML stream by processing Map value -------------------- + +// Default poll delay to keep Handler from spinning on an open stream +// like sitting on os.Stdin waiting for imput. +var xhandlerPollInterval = time.Millisecond + +// Bulk process XML using handlers that process a Map value. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument to a go routine in handler and return 'true'. +func HandleXmlReader(xmlReader io.Reader, mapHandler func(Map) bool, errHandler func(error) bool) error { + var n int + for { + m, merr := NewMapXmlReader(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// Bulk process XML using handlers that process a Map value and the raw XML. +// 'rdr' is an io.Reader for XML (stream) +// 'mapHandler' is the Map and raw XML - []byte - processor. Return of 'false' stops io.Reader processing. +// 'errHandler' is the error and raw XML processor. Return of 'false' stops io.Reader processing and returns the error. +// Note: mapHandler() and errHandler() calls are blocking, so reading and processing of messages is serialized. +// This means that you can stop reading the file on error or after processing a particular message. +// To have reading and handling run concurrently, pass argument(s) to a go routine in handler and return 'true'. +// See NewMapXmlReaderRaw for comment on performance associated with retrieving raw XML from a Reader. +func HandleXmlReaderRaw(xmlReader io.Reader, mapHandler func(Map, []byte) bool, errHandler func(error, []byte) bool) error { + var n int + for { + m, raw, merr := NewMapXmlReaderRaw(xmlReader) + n++ + + // handle error condition with errhandler + if merr != nil && merr != io.EOF { + merr = fmt.Errorf("[xmlReader: %d] %s", n, merr.Error()) + if ok := errHandler(merr, raw); !ok { + // caused reader termination + return merr + } + continue + } + + // pass to maphandler + if len(m) != 0 { + if ok := mapHandler(m, raw); !ok { + break + } + } else if merr != io.EOF { + time.Sleep(xhandlerPollInterval) + } + + if merr == io.EOF { + break + } + } + return nil +} + +// ----------------- END: Handle XML stream by processing Map value -------------- + +// -------- a hack of io.TeeReader ... need one that's an io.ByteReader for xml.NewDecoder() ---------- + +// This is a clone of io.TeeReader with the additional method t.ReadByte(). +// Thus, this TeeReader is also an io.ByteReader. +// This is necessary because xml.NewDecoder uses a ByteReader not a Reader. It appears to have been written +// with bufio.Reader or bytes.Reader in mind ... not a generic io.Reader, which doesn't have to have ReadByte().. +// If NewDecoder is passed a Reader that does not satisfy ByteReader() it wraps the Reader with +// bufio.NewReader and uses ReadByte rather than Read that runs the TeeReader pipe logic. + +type teeReader struct { + r io.Reader + w io.Writer + b []byte +} + +func myTeeReader(r io.Reader, w io.Writer) io.Reader { + b := make([]byte, 1) + return &teeReader{r, w, b} +} + +// need for io.Reader - but we don't use it ... +func (t *teeReader) Read(p []byte) (int, error) { + return 0, nil +} + +func (t *teeReader) ReadByte() (byte, error) { + n, err := t.r.Read(t.b) + if n > 0 { + if _, err := t.w.Write(t.b[:1]); err != nil { + return t.b[0], err + } + } + return t.b[0], err +} + +// For use with NewMapXmlReader & NewMapXmlSeqReader. +type byteReader struct { + r io.Reader + b []byte +} + +func myByteReader(r io.Reader) io.Reader { + b := make([]byte, 1) + return &byteReader{r, b} +} + +// Need for io.Reader interface ... +// Needed if reading a malformed http.Request.Body - issue #38. +func (b *byteReader) Read(p []byte) (int, error) { + return b.r.Read(p) +} + +func (b *byteReader) ReadByte() (byte, error) { + _, err := b.r.Read(b.b) + if len(b.b) > 0 { + return b.b[0], nil + } + var c byte + return c, err +} + +// ----------------------- END: io.TeeReader hack ----------------------------------- + +// ---------------------- XmlIndent - from j2x package ---------------------------- + +// Encode a map[string]interface{} as a pretty XML string. +// See Xml for encoding rules. +func (mv Map) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + b := new(bytes.Buffer) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p) + } else { + err = marshalMapToXmlIndent(true, b, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = marshalMapToXmlIndent(true, b, rootTag[0], m, p) + } else { + err = marshalMapToXmlIndent(true, b, DefaultRootTag, m, p) + } + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader(b.Bytes())) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return b.Bytes(), err +} + +type pretty struct { + indent string + cnt int + padding string + mapDepth int + start int +} + +func (p *pretty) Indent() { + p.padding += p.indent + p.cnt++ +} + +func (p *pretty) Outdent() { + if p.cnt > 0 { + p.padding = p.padding[:len(p.padding)-len(p.indent)] + p.cnt-- + } +} + +// where the work actually happens +// returns an error if an attribute is not atomic +// NOTE: 01may20 - replaces mapToXmlIndent(); uses bytes.Buffer instead for string appends. +func marshalMapToXmlIndent(doIndent bool, b *bytes.Buffer, key string, value interface{}, pp *pretty) error { + var err error + var endTag bool + var isSimple bool + var elen int + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + // per issue #48, 18apr18 - try and coerce maps to map[string]interface{} + // Don't need for mapToXmlSeqIndent, since maps there are decoded by NewMapXmlSeq(). + if reflect.ValueOf(value).Kind() == reflect.Map { + switch value.(type) { + case map[string]interface{}: + default: + val := make(map[string]interface{}) + vv := reflect.ValueOf(value) + keys := vv.MapKeys() + for _, k := range keys { + val[fmt.Sprint(k)] = vv.MapIndex(k).Interface() + } + value = val + } + } + + // 14jul20. The following block of code has become something of a catch all for odd stuff + // that might be passed in as a result of casting an arbitrary map[] to an mxj.Map + // value and then call m.Xml or m.XmlIndent. See issue #71 (and #73) for such edge cases. + switch value.(type) { + // these types are handled during encoding + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32, json.Number: + case []map[string]interface{}, []string, []float64, []bool, []int, []int32, []int64, []float32, []json.Number: + case []interface{}: + case nil: + value = "" + default: + // see if value is a struct, if so marshal using encoding/xml package + if reflect.ValueOf(value).Kind() == reflect.Struct { + if v, err := xml.Marshal(value); err != nil { + return err + } else { + value = string(v) + } + } else { + // coerce eveything else into a string value + value = fmt.Sprint(value) + } + } + + // start the XML tag with required indentaton and padding + if doIndent { + switch value.(type) { + case []interface{}, []string: + default: + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + } + switch value.(type) { + case []interface{}: + default: + if _, err = b.WriteString(`<` + key); err != nil { + return err + } + } + + switch value.(type) { + case map[string]interface{}: + vv := value.(map[string]interface{}) + lenvv := len(vv) + // scan out attributes - attribute keys have prepended attrPrefix + attrlist := make([][2]string, len(vv)) + var n int + var ss string + for k, v := range vv { + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + switch v.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(v.(string)) + } else { + ss = v.(string) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + case float64, bool, int, int32, int64, float32, json.Number: + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = fmt.Sprintf("%v", v) + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(v.([]byte))) + } else { + ss = string(v.([]byte)) + } + attrlist[n][0] = k[lenAttrPrefix:] + attrlist[n][1] = ss + default: + return fmt.Errorf("invalid attribute value for: %s:<%T>", k, v) + } + n++ + } + } + if n > 0 { + attrlist = attrlist[:n] + sort.Sort(attrList(attrlist)) + for _, v := range attrlist { + if _, err = b.WriteString(` ` + v[0] + `="` + v[1] + `"`); err != nil { + return err + } + } + } + // only attributes? + if n == lenvv { + if useGoXmlEmptyElemSyntax { + if _, err = b.WriteString(`"); err != nil { + return err + } + } else { + if _, err = b.WriteString(`/>`); err != nil { + return err + } + } + break + } + + // simple element? Note: '#text" is an invalid XML tag. + isComplex := false + if v, ok := vv["#text"]; ok && n+1 == lenvv { + // just the value and attributes + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } else { + v = string(v.([]byte)) + } + } + if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil { + return err + } + endTag = true + elen = 1 + isSimple = true + break + } else if ok { + // need to handle when there are subelements in addition to the simple element value + // issue #90 + switch v.(type) { + case string: + if xmlEscapeChars { + v = escapeChars(v.(string)) + } else { + v = v.(string) + } + case []byte: + if xmlEscapeChars { + v = escapeChars(string(v.([]byte))) + } else { + v = string(v.([]byte)) + } + } + if _, err = b.WriteString(">" + fmt.Sprintf("%v", v)); err != nil { + return err + } + isComplex = true + } + + // close tag with possible attributes + if !isComplex { + if _, err = b.WriteString(">"); err != nil { + return err + } + } + if doIndent { + // *s += "\n" + if _, err = b.WriteString("\n"); err != nil { + return err + } + } + // something more complex + p.mapDepth++ + // extract the map k:v pairs and sort on key + elemlist := make([][2]interface{}, len(vv)) + n = 0 + for k, v := range vv { + if k == "#text" { + // simple element handled above + continue + } + if lenAttrPrefix > 0 && lenAttrPrefix < len(k) && k[:lenAttrPrefix] == attrPrefix { + continue + } + elemlist[n][0] = k + elemlist[n][1] = v + n++ + } + elemlist = elemlist[:n] + sort.Sort(elemList(elemlist)) + var i int + for _, v := range elemlist { + switch v[1].(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := marshalMapToXmlIndent(doIndent, b, v[0].(string), v[1], p); err != nil { + return err + } + switch v[1].(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content ... + case []interface{}: + // special case - found during implementing Issue #23 + if len(value.([]interface{})) == 0 { + if doIndent { + if _, err = b.WriteString(p.padding + p.indent); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + elen = 0 + endTag = true + break + } + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case []string: + // This was added by https://github.com/slotix ... not a type that + // would be encountered if mv generated from NewMapXml, NewMapJson. + // Could be encountered in AnyXml(), so we'll let it stay, though + // it should be merged with case []interface{}, above. + //quick fix for []string type + //[]string should be treated exaclty as []interface{} + if len(value.([]string)) == 0 { + if doIndent { + if _, err = b.WriteString(p.padding + p.indent); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + elen = 0 + endTag = true + break + } + for _, v := range value.([]string) { + if doIndent { + p.Indent() + } + if err := marshalMapToXmlIndent(doIndent, b, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + // *s += p.padding + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + if _, err = b.WriteString("<" + key); err != nil { + return err + } + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + v := value.(string) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + // *s += ">" + v + if _, err = b.WriteString(">" + v); err != nil { + return err + } + } + case float64, bool, int, int32, int64, float32, json.Number: + v := fmt.Sprintf("%v", value) + elen = len(v) // always > 0 + if _, err = b.WriteString(">" + v); err != nil { + return err + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + v := string(value.([]byte)) + if xmlEscapeChars { + v = escapeChars(v) + } + elen = len(v) + if elen > 0 { + // *s += ">" + v + if _, err = b.WriteString(">" + v); err != nil { + return err + } + } + default: + if _, err = b.WriteString(">"); err != nil { + return err + } + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + if _, err = b.WriteString(">UNKNOWN"); err != nil { + return err + } + } else { + elen = len(v) + if elen > 0 { + if _, err = b.Write(v); err != nil { + return err + } + } + } + } + isSimple = true + endTag = true + } + if endTag { + if doIndent { + if !isSimple { + if _, err = b.WriteString(p.padding); err != nil { + return err + } + } + } + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + if _, err = b.WriteString(">"); err != nil { + return err + } + } + if _, err = b.WriteString(`"); err != nil { + return err + } + } else { + if _, err = b.WriteString(`/>`); err != nil { + return err + } + } + } + if doIndent { + if p.cnt > p.start { + if _, err = b.WriteString("\n"); err != nil { + return err + } + } + p.Outdent() + } + + return nil +} + +// ============================ sort interface implementation ================= + +type attrList [][2]string + +func (a attrList) Len() int { + return len(a) +} + +func (a attrList) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a attrList) Less(i, j int) bool { + return a[i][0] <= a[j][0] +} + +type elemList [][2]interface{} + +func (e elemList) Len() int { + return len(e) +} + +func (e elemList) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemList) Less(i, j int) bool { + return e[i][0].(string) <= e[j][0].(string) +} diff --git a/vendor/github.com/clbanning/mxj/v2/xmlseq.go b/vendor/github.com/clbanning/mxj/v2/xmlseq.go new file mode 100644 index 0000000000..80632bd3c3 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xmlseq.go @@ -0,0 +1,877 @@ +// Copyright 2012-2016, 2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +// xmlseq.go - version of xml.go with sequence # injection on Decoding and sorting on Encoding. +// Also, handles comments, directives and process instructions. + +package mxj + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +// MapSeq is like Map but contains seqencing indices to allow recovering the original order of +// the XML elements when the map[string]interface{} is marshaled. Element attributes are +// stored as a map["#attr"]map[]map[string]interface{}{"#text":"", "#seq":} +// value instead of denoting the keys with a prefix character. Also, comments, directives and +// process instructions are preserved. +type MapSeq map[string]interface{} + +// NoRoot is returned by NewXmlSeq, etc., when a comment, directive or procinstr element is parsed +// in the XML data stream and the element is not contained in an XML object with a root element. +var NoRoot = errors.New("no root key") +var NO_ROOT = NoRoot // maintain backwards compatibility + +// ------------------- NewMapXmlSeq & NewMapXmlSeqReader ... ------------------------- + +// NewMapXmlSeq converts a XML doc into a MapSeq value with elements id'd with decoding sequence key represented +// as map["#seq"]. +// If the optional argument 'cast' is 'true', then values will be converted to boolean or float64 if possible. +// NOTE: "#seq" key/value pairs are removed on encoding with msv.Xml() / msv.XmlIndent(). +// • attributes are a map - map["#attr"]map["attr_key"]map[string]interface{}{"#text":, "#seq":} +// • all simple elements are decoded as map["#text"]interface{} with a "#seq" k:v pair, as well. +// • lists always decode as map["list_tag"][]map[string]interface{} where the array elements are maps that +// include a "#seq" k:v pair based on sequence they are decoded. Thus, XML like: +// +// value 1 +// value 2 +// value 3 +// +// is decoded as: +// doc : +// ltag :[[]interface{}] +// [item: 0] +// #seq :[int] 0 +// #text :[string] value 1 +// [item: 1] +// #seq :[int] 2 +// #text :[string] value 3 +// newtag : +// #seq :[int] 1 +// #text :[string] value 2 +// It will encode in proper sequence even though the MapSeq representation merges all "ltag" elements in an array. +// • comments - "" - are decoded as map["#comment"]map["#text"]"cmnt_text" with a "#seq" k:v pair. +// • directives - "" - are decoded as map["#directive"]map[#text"]"directive_text" with a "#seq" k:v pair. +// • process instructions - "" - are decoded as map["#procinst"]interface{} where the #procinst value +// is of map[string]interface{} type with the following keys: #target, #inst, and #seq. +// • comments, directives, and procinsts that are NOT part of a document with a root key will be returned as +// map[string]interface{} and the error value 'NoRoot'. +// • note: ": tag preserve the +// ":" notation rather than stripping it as with NewMapXml(). +// 2. Attribute keys for name space prefix declarations preserve "xmlns:" notation. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeq(xmlVal []byte, cast ...bool) (MapSeq, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + return xmlSeqToMap(xmlVal, r) +} + +// NewMpaXmlSeqReader returns next XML doc from an io.Reader as a MapSeq value. +// NOTES: +// 1. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 2. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 3. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check the initial map key for a "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeqReader(xmlReader io.Reader, cast ...bool) (MapSeq, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + + // We need to put an *os.File reader in a ByteReader or the xml.NewDecoder + // will wrap it in a bufio.Reader and seek on the file beyond where the + // xml.Decoder parses! + if _, ok := xmlReader.(io.ByteReader); !ok { + xmlReader = myByteReader(xmlReader) // see code at EOF + } + + // build the map + return xmlSeqReaderToMap(xmlReader, r) +} + +// NewMapXmlSeqReaderRaw returns the next XML doc from an io.Reader as a MapSeq value. +// Returns MapSeq value, slice with the raw XML, and any error. +// NOTES: +// 1. Due to the implementation of xml.Decoder, the raw XML off the reader is buffered to []byte +// using a ByteReader. If the io.Reader is an os.File, there may be significant performance impact. +// See the examples - getmetrics1.go through getmetrics4.go - for comparative use cases on a large +// data set. If the io.Reader is wrapping a []byte value in-memory, however, such as http.Request.Body +// you CAN use it to efficiently unmarshal a XML doc and retrieve the raw XML in a single call. +// 2. The 'raw' return value may be larger than the XML text value. +// 3. The 'xmlReader' will be parsed looking for an xml.StartElement, xml.Comment, etc., so BOM and other +// extraneous xml.CharData will be ignored unless io.EOF is reached first. +// 4. CoerceKeysToLower() is NOT recognized, since the intent here is to eventually call m.XmlSeq() to +// re-encode the message in its original structure. +// 5. If CoerceKeysToSnakeCase() has been called, then all key values will be converted to snake case. +// +// ERRORS: +// 1. If a NoRoot error, "no root key," is returned, check if the initial map key is "#comment", +// "#directive" or #procinst" key. +func NewMapXmlSeqReaderRaw(xmlReader io.Reader, cast ...bool) (MapSeq, []byte, error) { + var r bool + if len(cast) == 1 { + r = cast[0] + } + // create TeeReader so we can retrieve raw XML + buf := make([]byte, 0) + wb := bytes.NewBuffer(buf) + trdr := myTeeReader(xmlReader, wb) + + m, err := xmlSeqReaderToMap(trdr, r) + + // retrieve the raw XML that was decoded + b := wb.Bytes() + + // err may be NoRoot + return m, b, err +} + +// xmlSeqReaderToMap() - parse a XML io.Reader to a map[string]interface{} value +func xmlSeqReaderToMap(rdr io.Reader, r bool) (map[string]interface{}, error) { + // parse the Reader + p := xml.NewDecoder(rdr) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// xmlSeqToMap - convert a XML doc into map[string]interface{} value +func xmlSeqToMap(doc []byte, r bool) (map[string]interface{}, error) { + b := bytes.NewReader(doc) + p := xml.NewDecoder(b) + if CustomDecoder != nil { + useCustomDecoder(p) + } else { + p.CharsetReader = XmlCharsetReader + } + return xmlSeqToMapParser("", nil, p, r) +} + +// ===================================== where the work happens ============================= + +// xmlSeqToMapParser - load a 'clean' XML doc into a map[string]interface{} directly. +// Add #seq tag value for each element decoded - to be used for Encoding later. +func xmlSeqToMapParser(skey string, a []xml.Attr, p *xml.Decoder, r bool) (map[string]interface{}, error) { + if snakeCaseKeys { + skey = strings.Replace(skey, "-", "_", -1) + } + + // NOTE: all attributes and sub-elements parsed into 'na', 'na' is returned as value for 'skey' in 'n'. + var n, na map[string]interface{} + var seq int // for including seq num when decoding + + // Allocate maps and load attributes, if any. + // NOTE: on entry from NewMapXml(), etc., skey=="", and we fall through + // to get StartElement then recurse with skey==xml.StartElement.Name.Local + // where we begin allocating map[string]interface{} values 'n' and 'na'. + if skey != "" { + // 'n' only needs one slot - save call to runtime•hashGrow() + // 'na' we don't know + n = make(map[string]interface{}, 1) + na = make(map[string]interface{}) + if len(a) > 0 { + // xml.Attr is decoded into: map["#attr"]map[]interface{} + // where interface{} is map[string]interface{}{"#text":, "#seq":} + aa := make(map[string]interface{}, len(a)) + for i, v := range a { + if snakeCaseKeys { + v.Name.Local = strings.Replace(v.Name.Local, "-", "_", -1) + } + if xmlEscapeCharsDecoder { // per issue#84 + v.Value = escapeChars(v.Value) + } + if len(v.Name.Space) > 0 { + aa[v.Name.Space+`:`+v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i} + } else { + aa[v.Name.Local] = map[string]interface{}{"#text": cast(v.Value, r, ""), "#seq": i} + } + } + na["#attr"] = aa + } + } + + // Return XMPP message. + if handleXMPPStreamTag && skey == "stream:stream" { + n[skey] = na + return n, nil + } + + for { + t, err := p.RawToken() + if err != nil { + if err != io.EOF { + return nil, errors.New("xml.Decoder.Token() - " + err.Error()) + } + return nil, err + } + switch t.(type) { + case xml.StartElement: + tt := t.(xml.StartElement) + + // First call to xmlSeqToMapParser() doesn't pass xml.StartElement - the map key. + // So when the loop is first entered, the first token is the root tag along + // with any attributes, which we process here. + // + // Subsequent calls to xmlSeqToMapParser() will pass in tag+attributes for + // processing before getting the next token which is the element value, + // which is done above. + if skey == "" { + if len(tt.Name.Space) > 0 { + return xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + return xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + } + + // If not initializing the map, parse the element. + // len(nn) == 1, necessarily - it is just an 'n'. + var nn map[string]interface{} + if len(tt.Name.Space) > 0 { + nn, err = xmlSeqToMapParser(tt.Name.Space+`:`+tt.Name.Local, tt.Attr, p, r) + } else { + nn, err = xmlSeqToMapParser(tt.Name.Local, tt.Attr, p, r) + } + if err != nil { + return nil, err + } + + // The nn map[string]interface{} value is a na[nn_key] value. + // We need to see if nn_key already exists - means we're parsing a list. + // This may require converting na[nn_key] value into []interface{} type. + // First, extract the key:val for the map - it's a singleton. + var key string + var val interface{} + for key, val = range nn { + break + } + + // add "#seq" k:v pair - + // Sequence number included even in list elements - this should allow us + // to properly resequence even something goofy like: + // item 1 + // item 2 + // item 3 + // where all the "list" subelements are decoded into an array. + switch val.(type) { + case map[string]interface{}: + val.(map[string]interface{})["#seq"] = seq + seq++ + case interface{}: // a non-nil simple element: string, float64, bool + v := map[string]interface{}{"#text": val, "#seq": seq} + seq++ + val = v + } + + // 'na' holding sub-elements of n. + // See if 'key' already exists. + // If 'key' exists, then this is a list, if not just add key:val to na. + if v, ok := na[key]; ok { + var a []interface{} + switch v.(type) { + case []interface{}: + a = v.([]interface{}) + default: // anything else - note: v.(type) != nil + a = []interface{}{v} + } + a = append(a, val) + na[key] = a + } else { + na[key] = val // save it as a singleton + } + case xml.EndElement: + if skey != "" { + tt := t.(xml.EndElement) + if snakeCaseKeys { + tt.Name.Local = strings.Replace(tt.Name.Local, "-", "_", -1) + } + var name string + if len(tt.Name.Space) > 0 { + name = tt.Name.Space + `:` + tt.Name.Local + } else { + name = tt.Name.Local + } + if skey != name { + return nil, fmt.Errorf("element %s not properly terminated, got %s at #%d", + skey, name, p.InputOffset()) + } + } + // len(n) > 0 if this is a simple element w/o xml.Attrs - see xml.CharData case. + if len(n) == 0 { + // If len(na)==0 we have an empty element == ""; + // it has no xml.Attr nor xml.CharData. + // Empty element content will be map["etag"]map["#text"]"" + // after #seq injection - map["etag"]map["#seq"]seq - after return. + if len(na) > 0 { + n[skey] = na + } else { + n[skey] = "" // empty element + } + } + return n, nil + case xml.CharData: + // clean up possible noise + tt := strings.Trim(string(t.(xml.CharData)), trimRunes) + if xmlEscapeCharsDecoder { // issue#84 + tt = escapeChars(tt) + } + if skey == "" { + // per Adrian (http://www.adrianlungu.com/) catch stray text + // in decoder stream - + // https://github.com/clbanning/mxj/pull/14#issuecomment-182816374 + // NOTE: CharSetReader must be set to non-UTF-8 CharSet or you'll get + // a p.Token() decoding error when the BOM is UTF-16 or UTF-32. + continue + } + if len(tt) > 0 { + // every simple element is a #text and has #seq associated with it + na["#text"] = cast(tt, r, "") + na["#seq"] = seq + seq++ + } + case xml.Comment: + if n == nil { // no root 'key' + n = map[string]interface{}{"#comment": string(t.(xml.Comment))} + return n, NoRoot + } + cm := make(map[string]interface{}, 2) + cm["#text"] = string(t.(xml.Comment)) + cm["#seq"] = seq + seq++ + na["#comment"] = cm + case xml.Directive: + if n == nil { // no root 'key' + n = map[string]interface{}{"#directive": string(t.(xml.Directive))} + return n, NoRoot + } + dm := make(map[string]interface{}, 2) + dm["#text"] = string(t.(xml.Directive)) + dm["#seq"] = seq + seq++ + na["#directive"] = dm + case xml.ProcInst: + if n == nil { + na = map[string]interface{}{"#target": t.(xml.ProcInst).Target, "#inst": string(t.(xml.ProcInst).Inst)} + n = map[string]interface{}{"#procinst": na} + return n, NoRoot + } + pm := make(map[string]interface{}, 3) + pm["#target"] = t.(xml.ProcInst).Target + pm["#inst"] = string(t.(xml.ProcInst).Inst) + pm["#seq"] = seq + seq++ + na["#procinst"] = pm + default: + // noop - shouldn't ever get here, now, since we handle all token types + } + } +} + +// ------------------ END: NewMapXml & NewMapXmlReader ------------------------- + +// --------------------- mv.XmlSeq & mv.XmlSeqWriter ------------------------- + +// Xml encodes a MapSeq as XML with elements sorted on #seq. The companion of NewMapXmlSeq(). +// The following rules apply. +// - The "#seq" key value is used to seqence the subelements or attributes only. +// - The "#attr" map key identifies the map of attribute map[string]interface{} values with "#text" key. +// - The "#comment" map key identifies a comment in the value "#text" map entry - . +// - The "#directive" map key identifies a directive in the value "#text" map entry - . +// - The "#procinst" map key identifies a process instruction in the value "#target" and "#inst" +// map entries - . +// - Value type encoding: +// > string, bool, float64, int, int32, int64, float32: per "%v" formating +// > []bool, []uint8: by casting to string +// > structures, etc.: handed to xml.Marshal() - if there is an error, the element +// value is "UNKNOWN" +// - Elements with only attribute values or are null are terminated using "/>" unless XmlGoEmptyElemSystax() called. +// - If len(mv) == 1 and no rootTag is provided, then the map key is used as the root tag, possible. +// Thus, `{ "key":"value" }` encodes as "value". +func (mv MapSeq) Xml(rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + var err error + s := new(string) + p := new(pretty) // just a stub + + if len(m) == 1 && len(rootTag) == 0 { + for key, value := range m { + // if it's an array, see if all values are map[string]interface{} + // we force a new root tag if we'll end up with no key:value in the list + // so: key:[string_val, bool:true] --> string_valtrue + switch value.(type) { + case []interface{}: + for _, v := range value.([]interface{}) { + switch v.(type) { + case map[string]interface{}: // noop + default: // anything else + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + goto done + } + } + } + err = mapToXmlSeqIndent(false, s, key, value, p) + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(false, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(false, s, DefaultRootTag, m, p) + } +done: + if xmlCheckIsValid { + d := xml.NewDecoder(bytes.NewReader([]byte(*s))) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return []byte(*s), err +} + +// The following implementation is provided only for symmetry with NewMapXmlReader[Raw] +// The names will also provide a key for the number of return arguments. + +// XmlWriter Writes the MapSeq value as XML on the Writer. +// See MapSeq.Xml() for encoding rules. +func (mv MapSeq) XmlWriter(xmlWriter io.Writer, rootTag ...string) error { + x, err := mv.Xml(rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// XmlWriteRaw writes the MapSeq value as XML on the Writer. []byte is the raw XML that was written. +// See Map.XmlSeq() for encoding rules. +/* +func (mv MapSeq) XmlWriterRaw(xmlWriter io.Writer, rootTag ...string) ([]byte, error) { + x, err := mv.Xml(rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// XmlIndentWriter writes the MapSeq value as pretty XML on the Writer. +// See MapSeq.Xml() for encoding rules. +func (mv MapSeq) XmlIndentWriter(xmlWriter io.Writer, prefix, indent string, rootTag ...string) error { + x, err := mv.XmlIndent(prefix, indent, rootTag...) + if err != nil { + return err + } + + _, err = xmlWriter.Write(x) + return err +} + +// XmlIndentWriterRaw writes the Map as pretty XML on the Writer. []byte is the raw XML that was written. +// See Map.XmlSeq() for encoding rules. +/* +func (mv MapSeq) XmlIndentWriterRaw(xmlWriter io.Writer, prefix, indent string, rootTag ...string) ([]byte, error) { + x, err := mv.XmlSeqIndent(prefix, indent, rootTag...) + if err != nil { + return x, err + } + + _, err = xmlWriter.Write(x) + return x, err +} +*/ + +// -------------------- END: mv.Xml & mv.XmlWriter ------------------------------- + +// ---------------------- XmlSeqIndent ---------------------------- + +// XmlIndent encodes a map[string]interface{} as a pretty XML string. +// See MapSeq.XmlSeq() for encoding rules. +func (mv MapSeq) XmlIndent(prefix, indent string, rootTag ...string) ([]byte, error) { + m := map[string]interface{}(mv) + + var err error + s := new(string) + p := new(pretty) + p.indent = indent + p.padding = prefix + + if len(m) == 1 && len(rootTag) == 0 { + // this can extract the key for the single map element + // use it if it isn't a key for a list + for key, value := range m { + if _, ok := value.([]interface{}); ok { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } else { + err = mapToXmlSeqIndent(true, s, key, value, p) + } + } + } else if len(rootTag) == 1 { + err = mapToXmlSeqIndent(true, s, rootTag[0], m, p) + } else { + err = mapToXmlSeqIndent(true, s, DefaultRootTag, m, p) + } + if xmlCheckIsValid { + if _, err = NewMapXml([]byte(*s)); err != nil { + return nil, err + } + d := xml.NewDecoder(bytes.NewReader([]byte(*s))) + for { + _, err = d.Token() + if err == io.EOF { + err = nil + break + } else if err != nil { + return nil, err + } + } + } + return []byte(*s), err +} + +// where the work actually happens +// returns an error if an attribute is not atomic +func mapToXmlSeqIndent(doIndent bool, s *string, key string, value interface{}, pp *pretty) error { + var endTag bool + var isSimple bool + var noEndTag bool + var elen int + var ss string + p := &pretty{pp.indent, pp.cnt, pp.padding, pp.mapDepth, pp.start} + + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if doIndent { + *s += p.padding + } + if key != "#comment" && key != "#directive" && key != "#procinst" { + *s += `<` + key + } + } + switch value.(type) { + case map[string]interface{}: + val := value.(map[string]interface{}) + + if key == "#comment" { + *s += `` + noEndTag = true + break + } + + if key == "#directive" { + *s += `` + noEndTag = true + break + } + + if key == "#procinst" { + *s += `` + noEndTag = true + break + } + + haveAttrs := false + // process attributes first + if v, ok := val["#attr"].(map[string]interface{}); ok { + // First, unroll the map[string]interface{} into a []keyval array. + // Then sequence it. + kv := make([]keyval, len(v)) + n := 0 + for ak, av := range v { + kv[n] = keyval{ak, av} + n++ + } + sort.Sort(elemListSeq(kv)) + // Now encode the attributes in original decoding sequence, using keyval array. + for _, a := range kv { + vv := a.v.(map[string]interface{}) + switch vv["#text"].(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(vv["#text"].(string)) + } else { + ss = vv["#text"].(string) + } + *s += ` ` + a.k + `="` + ss + `"` + case float64, bool, int, int32, int64, float32: + *s += ` ` + a.k + `="` + fmt.Sprintf("%v", vv["#text"]) + `"` + case []byte: + if xmlEscapeChars { + ss = escapeChars(string(vv["#text"].([]byte))) + } else { + ss = string(vv["#text"].([]byte)) + } + *s += ` ` + a.k + `="` + ss + `"` + default: + return fmt.Errorf("invalid attribute value for: %s", a.k) + } + } + haveAttrs = true + } + + // simple element? + // every map value has, at least, "#seq" and, perhaps, "#text" and/or "#attr" + _, seqOK := val["#seq"] // have key + if v, ok := val["#text"]; ok && ((len(val) == 3 && haveAttrs) || (len(val) == 2 && !haveAttrs)) && seqOK { + if stmp, ok := v.(string); ok && stmp != "" { + if xmlEscapeChars { + stmp = escapeChars(stmp) + } + *s += ">" + stmp + endTag = true + elen = 1 + } + isSimple = true + break + } else if !ok && ((len(val) == 2 && haveAttrs) || (len(val) == 1 && !haveAttrs)) && seqOK { + // here no #text but have #seq or #seq+#attr + endTag = false + break + } + + // we now need to sequence everything except attributes + // 'kv' will hold everything that needs to be written + kv := make([]keyval, 0) + for k, v := range val { + if k == "#attr" { // already processed + continue + } + if k == "#seq" { // ignore - just for sorting + continue + } + switch v.(type) { + case []interface{}: + // unwind the array as separate entries + for _, vv := range v.([]interface{}) { + kv = append(kv, keyval{k, vv}) + } + default: + kv = append(kv, keyval{k, v}) + } + } + + // close tag with possible attributes + *s += ">" + if doIndent { + *s += "\n" + } + // something more complex + p.mapDepth++ + sort.Sort(elemListSeq(kv)) + i := 0 + for _, v := range kv { + switch v.v.(type) { + case []interface{}: + default: + if i == 0 && doIndent { + p.Indent() + } + } + i++ + if err := mapToXmlSeqIndent(doIndent, s, v.k, v.v, p); err != nil { + return err + } + switch v.v.(type) { + case []interface{}: // handled in []interface{} case + default: + if doIndent { + p.Outdent() + } + } + i-- + } + p.mapDepth-- + endTag = true + elen = 1 // we do have some content other than attrs + case []interface{}: + for _, v := range value.([]interface{}) { + if doIndent { + p.Indent() + } + if err := mapToXmlSeqIndent(doIndent, s, key, v, p); err != nil { + return err + } + if doIndent { + p.Outdent() + } + } + return nil + case nil: + // terminate the tag + if doIndent { + *s += p.padding + } + *s += "<" + key + endTag, isSimple = true, true + break + default: // handle anything - even goofy stuff + elen = 0 + switch value.(type) { + case string: + if xmlEscapeChars { + ss = escapeChars(value.(string)) + } else { + ss = value.(string) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + case float64, bool, int, int32, int64, float32: + v := fmt.Sprintf("%v", value) + elen = len(v) + if elen > 0 { + *s += ">" + v + } + case []byte: // NOTE: byte is just an alias for uint8 + // similar to how xml.Marshal handles []byte structure members + if xmlEscapeChars { + ss = escapeChars(string(value.([]byte))) + } else { + ss = string(value.([]byte)) + } + elen = len(ss) + if elen > 0 { + *s += ">" + ss + } + default: + var v []byte + var err error + if doIndent { + v, err = xml.MarshalIndent(value, p.padding, p.indent) + } else { + v, err = xml.Marshal(value) + } + if err != nil { + *s += ">UNKNOWN" + } else { + elen = len(v) + if elen > 0 { + *s += string(v) + } + } + } + isSimple = true + endTag = true + } + if endTag && !noEndTag { + if doIndent { + if !isSimple { + *s += p.padding + } + } + switch value.(type) { + case map[string]interface{}, []byte, string, float64, bool, int, int32, int64, float32: + if elen > 0 || useGoXmlEmptyElemSyntax { + if elen == 0 { + *s += ">" + } + *s += `" + } else { + *s += `/>` + } + } + } else if !noEndTag { + if useGoXmlEmptyElemSyntax { + *s += `" + // *s += ">" + } else { + *s += "/>" + } + } + if doIndent { + if p.cnt > p.start { + *s += "\n" + } + p.Outdent() + } + + return nil +} + +// the element sort implementation + +type keyval struct { + k string + v interface{} +} +type elemListSeq []keyval + +func (e elemListSeq) Len() int { + return len(e) +} + +func (e elemListSeq) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e elemListSeq) Less(i, j int) bool { + var iseq, jseq int + var fiseq, fjseq float64 + var ok bool + if iseq, ok = e[i].v.(map[string]interface{})["#seq"].(int); !ok { + if fiseq, ok = e[i].v.(map[string]interface{})["#seq"].(float64); ok { + iseq = int(fiseq) + } else { + iseq = 9999999 + } + } + + if jseq, ok = e[j].v.(map[string]interface{})["#seq"].(int); !ok { + if fjseq, ok = e[j].v.(map[string]interface{})["#seq"].(float64); ok { + jseq = int(fjseq) + } else { + jseq = 9999999 + } + } + + return iseq <= jseq +} + +// =============== https://groups.google.com/forum/#!topic/golang-nuts/lHPOHD-8qio + +// BeautifyXml (re)formats an XML doc similar to Map.XmlIndent(). +// It preserves comments, directives and process instructions, +func BeautifyXml(b []byte, prefix, indent string) ([]byte, error) { + x, err := NewMapXmlSeq(b) + if err != nil { + return nil, err + } + return x.XmlIndent(prefix, indent) +} diff --git a/vendor/github.com/clbanning/mxj/v2/xmlseq2.go b/vendor/github.com/clbanning/mxj/v2/xmlseq2.go new file mode 100644 index 0000000000..467fd07697 --- /dev/null +++ b/vendor/github.com/clbanning/mxj/v2/xmlseq2.go @@ -0,0 +1,18 @@ +// Copyright 2012-2016, 2019 Charles Banning. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file + +package mxj + +// ---------------- expose Map methods to MapSeq type --------------------------- + +// Pretty print a Map. +func (msv MapSeq) StringIndent(offset ...int) string { + return writeMap(map[string]interface{}(msv), true, true, offset...) +} + +// Pretty print a Map without the value type information - just key:value entries. +func (msv MapSeq) StringIndentNoTypeInfo(offset ...int) string { + return writeMap(map[string]interface{}(msv), false, true, offset...) +} + diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 9ee97fc911..0da3efe4c2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,10 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 4b655c1453..921e59ec6e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -31,7 +31,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -579,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -933,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 1de13a4705..37448cae08 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,8 +31,8 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" + "path/filepath" "reflect" "sort" "strings" @@ -287,11 +287,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -1314,6 +1314,18 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { ), wantFailOnLossLess: true, }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, } for _, tt := range tests { @@ -1731,6 +1743,60 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { }) } +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 384ff7fd7f..3bc74463ec 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -159,7 +159,8 @@ type TOCEntry struct { // NumLink is the number of entry names pointing to this entry. // Zero means one name references this entry. - NumLink int + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` // Xattrs are the extended attribute for the entry. Xattrs map[string][]byte `json:"xattrs,omitempty"` diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go index a272b7ab23..fdcfba81ea 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/jwks.go @@ -2,6 +2,9 @@ package oidc import ( "context" + "crypto" + "crypto/ecdsa" + "crypto/rsa" "errors" "fmt" "io/ioutil" @@ -12,6 +15,35 @@ import ( jose "gopkg.in/square/go-jose.v2" ) +// StaticKeySet is a verifier that validates JWT against a static set of public keys. +type StaticKeySet struct { + // PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and + // *ecdsa.PublicKey. + PublicKeys []crypto.PublicKey +} + +// VerifySignature compares the signature against a static set of public keys. +func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { + jws, err := jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("parsing jwt: %v", err) + } + for _, pub := range s.PublicKeys { + switch pub.(type) { + case *rsa.PublicKey: + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("invalid public key type provided: %T", pub) + } + payload, err := jws.Verify(pub) + if err != nil { + continue + } + return payload, nil + } + return nil, fmt.Errorf("no public keys able to verify jwt") +} + // NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP // GETs to fetch JSON web token sets hosted at a remote URL. This is automatically // used by NewProvider using the URLs returned by OpenID Connect discovery, but is @@ -81,15 +113,23 @@ func (i *inflight) result() ([]jose.JSONWebKey, error) { return i.keys, i.err } +// paresdJWTKey is a context key that allows common setups to avoid parsing the +// JWT twice. It holds a *jose.JSONWebSignature value. +var parsedJWTKey contextKey + // VerifySignature validates a payload against a signature from the jwks_uri. // // Users MUST NOT call this method directly and should use an IDTokenVerifier // instead. This method skips critical validations such as 'alg' values and is // only exported to implement the KeySet interface. func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) { - jws, err := jose.ParseSigned(jwt) - if err != nil { - return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature) + if !ok { + var err error + jws, err = jose.ParseSigned(jwt) + if err != nil { + return nil, fmt.Errorf("oidc: malformed jwt: %v", err) + } } return r.verify(ctx, jws) } diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go index 3e1d80e08b..ae73eb0280 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go @@ -134,6 +134,48 @@ var supportedAlgorithms = map[string]bool{ PS512: true, } +// ProviderConfig allows creating providers when discovery isn't supported. It's +// generally easier to use NewProvider directly. +type ProviderConfig struct { + // IssuerURL is the identity of the provider, and the string it uses to sign + // ID tokens with. For example "https://accounts.google.com". This value MUST + // match ID tokens exactly. + IssuerURL string + // AuthURL is the endpoint used by the provider to support the OAuth 2.0 + // authorization endpoint. + AuthURL string + // TokenURL is the endpoint used by the provider to support the OAuth 2.0 + // token endpoint. + TokenURL string + // UserInfoURL is the endpoint used by the provider to support the OpenID + // Connect UserInfo flow. + // + // https://openid.net/specs/openid-connect-core-1_0.html#UserInfo + UserInfoURL string + // JWKSURL is the endpoint used by the provider to advertise public keys to + // verify issued ID tokens. This endpoint is polled as new keys are made + // available. + JWKSURL string + + // Algorithms, if provided, indicate a list of JWT algorithms allowed to sign + // ID tokens. If not provided, this defaults to the algorithms advertised by + // the JWK endpoint, then the set of algorithms supported by this package. + Algorithms []string +} + +// NewProvider initializes a provider from a set of endpoints, rather than +// through discovery. +func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider { + return &Provider{ + issuer: p.IssuerURL, + authURL: p.AuthURL, + tokenURL: p.TokenURL, + userInfoURL: p.UserInfoURL, + algorithms: p.Algorithms, + remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL), + } +} + // NewProvider uses the OpenID Connect discovery mechanism to construct a Provider. // // The issuer is the URL identifier for the service. For example: "https://accounts.google.com" diff --git a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go index dc6b56dfe3..464b61e6c0 100644 --- a/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go +++ b/vendor/github.com/coreos/go-oidc/v3/oidc/verify.go @@ -21,6 +21,18 @@ const ( issuerGoogleAccountsNoScheme = "accounts.google.com" ) +// TokenExpiredError indicates that Verify failed because the token was expired. This +// error does NOT indicate that the token is not also invalid for other reasons. Other +// checks might have failed if the expiration check had not failed. +type TokenExpiredError struct { + // Expiry is the time when the token expired. + Expiry time.Time +} + +func (e *TokenExpiredError) Error() string { + return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry) +} + // KeySet is a set of publc JSON Web Keys that can be used to validate the signature // of JSON web tokens. This is expected to be backed by a remote key set through // provider metadata discovery or an in-memory set of keys delivered out-of-band. @@ -55,15 +67,10 @@ type IDTokenVerifier struct { // keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs") // verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) // -// Since KeySet is an interface, this constructor can also be used to supply custom -// public key sources. For example, if a user wanted to supply public keys out-of-band -// and hold them statically in-memory: +// Or a static key set (e.g. for testing): // -// // Custom KeySet implementation. -// keySet := newStatisKeySet(publicKeys...) -// -// // Verifier uses the custom KeySet implementation. -// verifier := oidc.NewVerifier("https://auth.example.com", keySet, config) +// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}} +// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config) // func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier { return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL} @@ -103,9 +110,6 @@ type Config struct { } // Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs. -// -// The returned IDTokenVerifier is tied to the Provider's context and its behavior is -// undefined once the Provider's context is canceled. func (p *Provider) Verifier(config *Config) *IDTokenVerifier { if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 { // Make a copy so we don't modify the config values. @@ -268,13 +272,15 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok nowTime := now() if t.Expiry.Before(nowTime) { - return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", t.Expiry) + return nil, &TokenExpiredError{Expiry: t.Expiry} } // If nbf claim is provided in token, ensure that it is indeed in the past. if token.NotBefore != nil { nbfTime := time.Time(*token.NotBefore) - leeway := 1 * time.Minute + // Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew. + // https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153 + leeway := 5 * time.Minute if nowTime.Add(leeway).Before(nbfTime) { return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime) @@ -302,6 +308,7 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok t.sigAlgorithm = sig.Header.Algorithm + ctx = context.WithValue(ctx, parsedJWTKey, jws) gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken) if err != nil { return nil, fmt.Errorf("failed to verify signature: %v", err) diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index f7409d546a..3a5299474b 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,9 @@ # Change history of go-restful (v2 only) +## v2.16.0 - 2022-07-11 + +- Backported CORS filter. #489 (#493) #503 + ## v2.15.0 - 2020-11-10 - Add OPTIONS in Webservice diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go index 1efeef072d..9d18dfb7b4 100644 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ b/vendor/github.com/emicklei/go-restful/cors_filter.go @@ -18,9 +18,22 @@ import ( // http://enable-cors.org/server.html // http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. + ExposeHeaders []string // list of Header names + + // AllowedHeaders is alist of Header names. Checking is case-insensitive. + // The list may contain the special wildcard string ".*" ; all is allowed + AllowedHeaders []string + + // AllowedDomains is a list of allowed values for Http Origin. + // The list may contain the special wildcard string ".*" ; all is allowed + // If empty all are allowed. + AllowedDomains []string + + // AllowedDomainFunc is optional and is a function that will do the check + // when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*". + AllowedDomainFunc func(origin string) bool + + // AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive. AllowedMethods []string MaxAge int // number of seconds before requiring new Options request CookiesAllowed bool @@ -119,36 +132,24 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { if len(origin) == 0 { return false } + lowerOrigin := strings.ToLower(origin) if len(c.AllowedDomains) == 0 { + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(lowerOrigin) + } return true } - allowed := false + // exact match on each allowed domain for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break + if domain == ".*" || strings.ToLower(domain) == lowerOrigin { + return true } } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(origin) } - - return allowed + return false } func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { @@ -184,19 +185,9 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str if strings.ToLower(each) == strings.ToLower(header) { return true } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err + if each == "*" { + return true } - regexps = append(regexps, r) } - return regexps, nil + return false } diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go index e8793304b1..febe2cc170 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/parameter.go @@ -20,6 +20,9 @@ const ( // FormParameterKind = indicator of Request parameter type "form" FormParameterKind + // MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data" + MultiPartFormParameterKind + // CollectionFormatCSV comma separated values `foo,bar` CollectionFormatCSV = CollectionFormat("csv") @@ -94,6 +97,11 @@ func (p *Parameter) beForm() *Parameter { return p } +func (p *Parameter) beMultiPartForm() *Parameter { + p.data.Kind = MultiPartFormParameterKind + return p +} + // Required sets the required field and returns the receiver func (p *Parameter) Required(required bool) *Parameter { p.data.Required = required diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go index a20730febf..05d768117c 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/request.go @@ -17,6 +17,7 @@ type Request struct { pathParameters map[string]string attributes map[string]interface{} // for storing request-scoped values selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees + selectedRoute *Route } func NewRequest(httpRequest *http.Request) *Request { @@ -114,5 +115,10 @@ func (r Request) Attribute(name string) interface{} { // SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath + return r.selectedRoute.Path +} + +// SelectedRoute return the Route that selected by the container +func (r Request) SelectedRoute() RouteReader { + return routeAccessor{route: r.selectedRoute} } diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go index e2f78f00f7..8f0b56aa2d 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -175,7 +175,7 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType } // WriteError writes the http status and the error string on the response. err can be nil. -// Return an error if writing was not succesful. +// Return an error if writing was not successful. func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { r.err = err if err == nil { diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index 598aa57a76..6ac2612931 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -69,7 +69,7 @@ func (r *Route) postBuild() { func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { wrappedRequest := NewRequest(httpRequest) wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoutePath = r.Path + wrappedRequest.selectedRoute = r wrappedResponse := NewResponse(httpWriter) wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) wrappedResponse.routeProduces = r.Produces diff --git a/vendor/github.com/emicklei/go-restful/route_reader.go b/vendor/github.com/emicklei/go-restful/route_reader.go new file mode 100644 index 0000000000..c9f4ee75f3 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/route_reader.go @@ -0,0 +1,66 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +type RouteReader interface { + Method() string + Consumes() []string + Path() string + Doc() string + Notes() string + Operation() string + ParameterDocs() []*Parameter + // Returns a copy + Metadata() map[string]interface{} + Deprecated() bool +} + +type routeAccessor struct { + route *Route +} + +func (r routeAccessor) Method() string { + return r.route.Method +} +func (r routeAccessor) Consumes() []string { + return r.route.Consumes[:] +} +func (r routeAccessor) Path() string { + return r.route.Path +} +func (r routeAccessor) Doc() string { + return r.route.Doc +} +func (r routeAccessor) Notes() string { + return r.route.Notes +} +func (r routeAccessor) Operation() string { + return r.route.Operation +} +func (r routeAccessor) ParameterDocs() []*Parameter { + return r.route.ParameterDocs[:] +} + +// Returns a copy +func (r routeAccessor) Metadata() map[string]interface{} { + return copyMap(r.route.Metadata) +} +func (r routeAccessor) Deprecated() bool { + return r.route.Deprecated +} + +// https://stackoverflow.com/questions/23057785/how-to-copy-a-map +func copyMap(m map[string]interface{}) map[string]interface{} { + cp := make(map[string]interface{}) + for k, v := range m { + vm, ok := v.(map[string]interface{}) + if ok { + cp[k] = copyMap(vm) + } else { + cp[k] = v + } + } + return cp +} diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index 2c164a2a2c..2eac41497b 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -165,6 +165,18 @@ func FormParameter(name, description string) *Parameter { return p } +// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) MultiPartFormParameter(name, description string) *Parameter { + return MultiPartFormParameter(name, description) +} + +func MultiPartFormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beMultiPartForm() + return p +} + // Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. func (w *WebService) Route(builder *RouteBuilder) *WebService { w.routesLock.Lock() diff --git a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml b/vendor/github.com/form3tech-oss/jwt-go/.travis.yml deleted file mode 100644 index 3c7fb7e1ae..0000000000 --- a/vendor/github.com/form3tech-oss/jwt-go/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.12 - - 1.13 - - 1.14 - - 1.15 - - tip diff --git a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f793cb..0000000000 --- a/vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml index 779b1c0a35..3cc10c4e90 100644 --- a/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml +++ b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml @@ -9,11 +9,16 @@ builds: - amd64 - 386 - arm64 + - s390x ignore: - goos: darwin goarch: 386 - goos: windows goarch: arm64 + - goos: darwin + goarch: s390x + - goos: windows + goarch: s390x ldflags: - -s -w -X main.version=v{{.Version}} diff --git a/vendor/github.com/fullstorydev/grpcurl/.travis.yml b/vendor/github.com/fullstorydev/grpcurl/.travis.yml deleted file mode 100644 index 80c32ca245..0000000000 --- a/vendor/github.com/fullstorydev/grpcurl/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go -sudo: false - -matrix: - include: - - go: 1.11.x - env: GO111MODULE=on - - go: 1.12.x - env: GO111MODULE=off - - go: 1.12.x - env: GO111MODULE=on - - go: 1.13.x - env: - - GO111MODULE=on - - VET=1 - - go: 1.14.x - env: GO111MODULE=on - - go: 1.15.x - env: GO111MODULE=on - - go: tip - env: GO111MODULE=on - -script: - - if [[ "$VET" = 1 ]]; then make ci; else make deps test; fi diff --git a/vendor/github.com/fullstorydev/grpcurl/Dockerfile b/vendor/github.com/fullstorydev/grpcurl/Dockerfile index a2c22bcbf5..1fe3fc19af 100644 --- a/vendor/github.com/fullstorydev/grpcurl/Dockerfile +++ b/vendor/github.com/fullstorydev/grpcurl/Dockerfile @@ -11,8 +11,6 @@ COPY cmd /tmp/fullstorydev/grpcurl/cmd # and build a completely static binary (so we can use # scratch as basis for the final image) ENV CGO_ENABLED=0 -ENV GOOS=linux -ENV GOARCH=amd64 ENV GO111MODULE=on RUN go build -o /grpcurl \ -ldflags "-w -extldflags \"-static\" -X \"main.version=$(cat VERSION)\"" \ diff --git a/vendor/github.com/fullstorydev/grpcurl/Makefile b/vendor/github.com/fullstorydev/grpcurl/Makefile index 42ac3cc939..67c9bf441c 100644 --- a/vendor/github.com/fullstorydev/grpcurl/Makefile +++ b/vendor/github.com/fullstorydev/grpcurl/Makefile @@ -22,7 +22,7 @@ install: .PHONY: release release: - @GO111MODULE=on go install github.com/goreleaser/goreleaser + @go install github.com/goreleaser/goreleaser@v0.134.0 goreleaser --rm-dist .PHONY: docker @@ -46,29 +46,31 @@ vet: # CI is just getting latest master for dependencies like grpc. .PHONY: staticcheck staticcheck: - @GO111MODULE=on go install honnef.co/go/tools/cmd/staticcheck + @go install honnef.co/go/tools/cmd/staticcheck@v0.0.1-2020.1.4 staticcheck ./... .PHONY: ineffassign ineffassign: - @GO111MODULE=on go install github.com/gordonklaus/ineffassign + @go install github.com/gordonklaus/ineffassign@7953dde2c7bf ineffassign . .PHONY: predeclared predeclared: - @GO111MODULE=on go install github.com/nishanths/predeclared + @go install github.com/nishanths/predeclared@86fad755b4d3 predeclared . # Intentionally omitted from CI, but target here for ad-hoc reports. .PHONY: golint golint: - @GO111MODULE=on go install golang.org/x/lint/golint + # TODO: pin version + @go install golang.org/x/lint/golint@latest golint -min_confidence 0.9 -set_exit_status ./... # Intentionally omitted from CI, but target here for ad-hoc reports. .PHONY: errcheck errcheck: - @GO111MODULE=on go install github.com/kisielk/errcheck + # TODO: pin version + @go install github.com/kisielk/errcheck@latest errcheck ./... .PHONY: test diff --git a/vendor/github.com/fullstorydev/grpcurl/README.md b/vendor/github.com/fullstorydev/grpcurl/README.md index e932142df7..cbb7e4f8bf 100644 --- a/vendor/github.com/fullstorydev/grpcurl/README.md +++ b/vendor/github.com/fullstorydev/grpcurl/README.md @@ -1,5 +1,5 @@ # gRPCurl -[![Build Status](https://travis-ci.com/fullstorydev/grpcurl.svg?branch=master)](https://travis-ci.com/github/fullstorydev/grpcurl/branches) +[![Build Status](https://circleci.com/gh/fullstorydev/grpcurl/tree/master.svg?style=svg)](https://circleci.com/gh/fullstorydev/grpcurl/tree/master) [![Go Report Card](https://goreportcard.com/badge/github.com/fullstorydev/grpcurl)](https://goreportcard.com/report/github.com/fullstorydev/grpcurl) `grpcurl` is a command-line tool that lets you interact with gRPC servers. It's @@ -65,6 +65,10 @@ docker pull fullstorydev/grpcurl:latest # Run the tool docker run fullstorydev/grpcurl api.grpc.me:443 list ``` +Note that there are some pitfalls when using docker: +- If you need to interact with a server listening on the host's loopback network, you must specify the host as `host.docker.internal` instead of `localhost` (for Mac or Windows) _OR_ have the container use the host network with `-network="host"` (Linux only). +- If you need to provide proto source files or descriptor sets, you must mount the folder containing the files as a volume (`-v $(pwd):/protos`) and adjust the import paths to container paths accordingly. +- If you want to provide the request message via stdin, using the `-d @` option, you need to use the `-i` flag on the docker command. ### Other Packages @@ -79,8 +83,7 @@ https://repology.org/project/grpcurl/information If you already have the [Go SDK](https://golang.org/doc/install) installed, you can use the `go` tool to install `grpcurl`: ```shell -go get github.com/fullstorydev/grpcurl/... -go install github.com/fullstorydev/grpcurl/cmd/grpcurl +go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest ``` This installs the command into the `bin` sub-folder of wherever your `$GOPATH` diff --git a/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go index b0e69a9345..6707837b16 100644 --- a/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go +++ b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go @@ -408,12 +408,22 @@ func main() { } var creds credentials.TransportCredentials if !*plaintext { - var err error - creds, err = grpcurl.ClientTransportCredentials(*insecure, *cacert, *cert, *key) + tlsConf, err := grpcurl.ClientTLSConfig(*insecure, *cacert, *cert, *key) if err != nil { - fail(err, "Failed to configure transport credentials") + fail(err, "Failed to create TLS config") } + sslKeylogFile := os.Getenv("SSLKEYLOGFILE") + if sslKeylogFile != "" { + w, err := os.OpenFile(sslKeylogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) + if err != nil { + fail(err, "Could not open SSLKEYLOGFILE %s", sslKeylogFile) + } + tlsConf.KeyLogWriter = w + } + + creds = credentials.NewTLS(tlsConf) + // can use either -servername or -authority; but not both if *serverName != "" && *authority != "" { if *serverName == *authority { @@ -428,9 +438,7 @@ func main() { } if overrideName != "" { - if err := creds.OverrideServerName(overrideName); err != nil { - fail(err, "Failed to override server name as %q", overrideName) - } + opts = append(opts, grpc.WithAuthority(overrideName)) } } else if *authority != "" { opts = append(opts, grpc.WithAuthority(*authority)) diff --git a/vendor/github.com/fullstorydev/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go index e6279885cc..f36fc9eec7 100644 --- a/vendor/github.com/fullstorydev/grpcurl/grpcurl.go +++ b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go @@ -27,6 +27,7 @@ import ( "github.com/jhump/protoreflect/dynamic" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" protov2 "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/descriptorpb" @@ -508,11 +509,25 @@ func makeTemplate(md *desc.MessageDescriptor, path []*desc.MessageDescriptor) pr return dm } -// ClientTransportCredentials builds transport credentials for a gRPC client using the +// ClientTransportCredentials is a helper function that constructs a TLS config with +// the given properties (see ClientTLSConfig) and then constructs and returns gRPC +// transport credentials using that config. +// +// Deprecated: Use grpcurl.ClientTLSConfig and credentials.NewTLS instead. +func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (credentials.TransportCredentials, error) { + tlsConf, err := ClientTLSConfig(insecureSkipVerify, cacertFile, clientCertFile, clientKeyFile) + if err != nil { + return nil, err + } + + return credentials.NewTLS(tlsConf), nil +} + +// ClientTLSConfig builds transport-layer config for a gRPC client using the // given properties. If cacertFile is blank, only standard trusted certs are used to // verify the server certs. If clientCertFile is blank, the client will not use a client // certificate. If clientCertFile is not blank then clientKeyFile must not be blank. -func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (credentials.TransportCredentials, error) { +func ClientTLSConfig(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (*tls.Config, error) { var tlsConf tls.Config if clientCertFile != "" { @@ -542,7 +557,7 @@ func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertF tlsConf.RootCAs = certPool } - return credentials.NewTLS(&tlsConf), nil + return &tlsConf, nil } // ServerTransportCredentials builds transport credentials for a gRPC server using the @@ -618,11 +633,11 @@ func BlockingDial(ctx context.Context, network, address string, creds credential dialer := func(ctx context.Context, address string) (net.Conn, error) { // NB: We *could* handle the TLS handshake ourselves, in the custom // dialer (instead of customizing both the dialer and the credentials). - // But that requires using WithInsecure dial option (so that the gRPC - // library doesn't *also* try to do a handshake). And that would mean - // that the library would send the wrong ":scheme" metaheader to - // servers: it would send "http" instead of "https" because it is - // unaware that TLS is actually in use. + // But that requires using insecure.NewCredentials() dial transport + // option (so that the gRPC library doesn't *also* try to do a + // handshake). And that would mean that the library would send the + // wrong ":scheme" metaheader to servers: it would send "http" instead + // of "https" because it is unaware that TLS is actually in use. conn, err := (&net.Dialer{}).DialContext(ctx, network, address) if err != nil { writeResult(err) @@ -643,7 +658,7 @@ func BlockingDial(ctx context.Context, network, address string, creds credential opts = append(opts, grpc.WithBlock(), grpc.WithContextDialer(dialer)) if creds == nil { - opts = append(opts, grpc.WithInsecure()) + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { opts = append(opts, grpc.WithTransportCredentials(creds)) } diff --git a/vendor/github.com/fullstorydev/grpcurl/invoke.go b/vendor/github.com/fullstorydev/grpcurl/invoke.go index 0db362c5c9..b5bae4b1da 100644 --- a/vendor/github.com/fullstorydev/grpcurl/invoke.go +++ b/vendor/github.com/fullstorydev/grpcurl/invoke.go @@ -227,15 +227,19 @@ func invokeClientStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.Met return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err) } - if respHeaders, err := str.Header(); err == nil { - handler.OnReceiveHeaders(respHeaders) + if str != nil { + if respHeaders, err := str.Header(); err == nil { + handler.OnReceiveHeaders(respHeaders) + } } if stat.Code() == codes.OK { handler.OnReceiveResponse(resp) } - handler.OnReceiveTrailers(stat, str.Trailer()) + if str != nil { + handler.OnReceiveTrailers(stat, str.Trailer()) + } return nil } @@ -334,8 +338,10 @@ func invokeBidi(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescr }() } - if respHeaders, err := str.Header(); err == nil { - handler.OnReceiveHeaders(respHeaders) + if str != nil { + if respHeaders, err := str.Header(); err == nil { + handler.OnReceiveHeaders(respHeaders) + } } // Download each response message @@ -362,7 +368,9 @@ func invokeBidi(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescr return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err) } - handler.OnReceiveTrailers(stat, str.Trailer()) + if str != nil { + handler.OnReceiveTrailers(stat, str.Trailer()) + } return nil } diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0a..ab59311813 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000000..7accdb0c40 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c05482a203..c3b56b3d2c 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -115,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/otel/exporters/otlp/LICENSE rename to vendor/github.com/go-logr/stdr/LICENSE diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000000..5158667890 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000000..93a8aab51b --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml index 8cad298791..e24a6c14e6 100644 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ b/vendor/github.com/go-openapi/analysis/.golangci.yml @@ -51,3 +51,6 @@ linters: - forbidigo - cyclop - varnamelen + - exhaustruct + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml index 449a43c2bc..4e1fc0c7d4 100644 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ b/vendor/github.com/go-openapi/errors/.golangci.yml @@ -44,3 +44,5 @@ linters: - cyclop - errname - varnamelen + - exhaustruct + - maintidx diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index 854d6eec1e..77f1f92c5e 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -99,6 +99,7 @@ func (m MethodNotAllowedError) MarshalJSON() ([]byte, error) { } func errorAsJSON(err Error) []byte { + //nolint:errchkjson b, _ := json.Marshal(struct { Code int32 `json:"code"` Message string `json:"message"` @@ -146,7 +147,7 @@ func ServeError(rw http.ResponseWriter, r *http.Request, err error) { ServeError(rw, r, nil) } case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) + rw.Header().Add("Allow", strings.Join(e.Allowed, ",")) rw.WriteHeader(asHTTPCode(int(e.Code()))) if r == nil || r.Method != http.MethodHead { _, _ = rw.Write(errorAsJSON(e)) diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go index 963d427407..af01190ce6 100644 --- a/vendor/github.com/go-openapi/errors/doc.go +++ b/vendor/github.com/go-openapi/errors/doc.go @@ -13,7 +13,6 @@ // limitations under the License. /* - Package errors provides an Error interface and several concrete types implementing this interface to manage API errors and JSON-schema validation errors. @@ -23,6 +22,5 @@ it defines. It is used throughout the various go-openapi toolkit libraries (https://github.com/go-openapi). - */ package errors diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go index c26ad484eb..963472d1f3 100644 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ b/vendor/github.com/go-openapi/errors/middleware.go @@ -28,7 +28,6 @@ type APIVerificationFailed struct { MissingRegistration []string `json:"missingRegistration,omitempty"` } -// func (v *APIVerificationFailed) Error() string { buf := bytes.NewBuffer(nil) diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go index 0b7e382465..0d1691149d 100644 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ b/vendor/github.com/go-openapi/runtime/client_response.go @@ -15,10 +15,9 @@ package runtime import ( + "encoding/json" "fmt" "io" - - "encoding/json" ) // A ClientResponse represents a client response @@ -61,13 +60,18 @@ type APIError struct { Code int } -func (a *APIError) Error() string { - resp, _ := json.Marshal(a.Response) - return fmt.Sprintf("%s (status %d): %s", a.OperationName, a.Code, resp) +func (o *APIError) Error() string { + var resp []byte + if err, ok := o.Response.(error); ok { + resp = []byte("'" + err.Error() + "'") + } else { + resp, _ = json.Marshal(o.Response) + } + return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) } -func (a *APIError) String() string { - return a.Error() +func (o *APIError) String() string { + return o.Error() } // IsSuccess returns true when this elapse o k response returns a 2xx status code diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml deleted file mode 100644 index 2281a07b05..0000000000 --- a/vendor/github.com/go-openapi/spec/.travis.yml +++ /dev/null @@ -1,31 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -arch: - - amd64 -jobs: - include: - # only run fast tests on ppc64le - - go: 1.x - arch: ppc64le - script: - - gotestsum -f short-verbose -- ./... - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master - -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go index d6c4839712..e8b6009945 100644 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ b/vendor/github.com/go-openapi/spec/normalizer.go @@ -40,7 +40,7 @@ const fileScheme = "file" // // The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). func normalizeURI(refPath, base string) string { - refURL, err := url.Parse(refPath) + refURL, err := parseURL(refPath) if err != nil { specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) refURL, refPath = repairURI(refPath) @@ -58,7 +58,7 @@ func normalizeURI(refPath, base string) string { return refURL.String() } - baseURL, _ := url.Parse(base) + baseURL, _ := parseURL(base) if path.IsAbs(refURL.Path) { baseURL.Path = refURL.Path } else if refURL.Path != "" { @@ -84,7 +84,6 @@ func normalizeURI(refPath, base string) string { // There is a special case for schemas that are anchored with an "id": // in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. // All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. -// func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) @@ -94,7 +93,7 @@ func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { } if id != "" { - idBaseURL, err := url.Parse(id) + idBaseURL, err := parseURL(id) if err == nil { // if the schema id is not usable as a URI, ignore it if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") // $ref relative to the ID of the schema in the root document @@ -103,7 +102,7 @@ func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { } } - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) + originalRelativeBaseURL, _ := parseURL(originalRelativeBase) r, _ := rebase(ref, originalRelativeBaseURL, false) @@ -168,7 +167,7 @@ func normalizeRef(ref *Ref, relativeBase string) *Ref { // // See also: https://en.wikipedia.org/wiki/File_URI_scheme func normalizeBase(in string) string { - u, err := url.Parse(in) + u, err := parseURL(in) if err != nil { specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) u, in = repairURI(in) diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go index c8a0645347..2df0723154 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows // Copyright 2015 go-swagger maintainers @@ -34,7 +35,7 @@ func absPath(in string) string { } func repairURI(in string) (*url.URL, string) { - u, _ := url.Parse("") + u, _ := parseURL("") debugLog("repaired URI: original: %q, repaired: %q", in, "") return u, "" } diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go index fe2d1ecd43..a66c532dbc 100644 --- a/vendor/github.com/go-openapi/spec/normalizer_windows.go +++ b/vendor/github.com/go-openapi/spec/normalizer_windows.go @@ -60,13 +60,13 @@ func repairURI(in string) (*url.URL, string) { const prefix = fileScheme + "://" if !strings.HasPrefix(in, prefix) { // giving up: resolve to empty path - u, _ := url.Parse("") + u, _ := parseURL("") return u, "" } // attempt the repair, stripping the scheme should be sufficient - u, _ := url.Parse(strings.TrimPrefix(in, prefix)) + u, _ := parseURL(strings.TrimPrefix(in, prefix)) debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) return u, u.String() diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go index a8d0f737a7..4e9be8576b 100644 --- a/vendor/github.com/go-openapi/spec/schema.go +++ b/vendor/github.com/go-openapi/spec/schema.go @@ -17,7 +17,6 @@ package spec import ( "encoding/json" "fmt" - "net/url" "strings" "github.com/go-openapi/jsonpointer" @@ -145,7 +144,7 @@ func (r *SchemaURL) fromMap(v map[string]interface{}) error { } if vv, ok := v["$schema"]; ok { if str, ok := vv.(string); ok { - u, err := url.Parse(str) + u, err := parseURL(str) if err != nil { return err } diff --git a/vendor/github.com/go-openapi/spec/url_go18.go b/vendor/github.com/go-openapi/spec/url_go18.go new file mode 100644 index 0000000000..60b7851536 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go18.go @@ -0,0 +1,8 @@ +//go:build !go1.19 +// +build !go1.19 + +package spec + +import "net/url" + +var parseURL = url.Parse diff --git a/vendor/github.com/go-openapi/spec/url_go19.go b/vendor/github.com/go-openapi/spec/url_go19.go new file mode 100644 index 0000000000..392e3e6395 --- /dev/null +++ b/vendor/github.com/go-openapi/spec/url_go19.go @@ -0,0 +1,14 @@ +//go:build go1.19 +// +build go1.19 + +package spec + +import "net/url" + +func parseURL(s string) (*url.URL, error) { + u, err := url.Parse(s) + if err == nil { + u.OmitHost = false + } + return u, err +} diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml index da12d5e3b7..d36b25665c 100644 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ b/vendor/github.com/go-openapi/strfmt/.golangci.yml @@ -37,6 +37,7 @@ linters: - paralleltest - varnamelen - ireturn + - exhaustruct #- thelper issues: diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 2a4a71f3a8..bf503e4000 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -48,3 +48,7 @@ linters: - goimports - tenv - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c5014..55094cb74c 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 9a60409725..00038c3773 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 193702f2ce..f78ab684a0 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index ec96914405..f09ee609f3 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,126 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +215,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +390,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index 3e0d8c770d..bd14c2a269 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -92,7 +92,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { res := new(Result) s := d.SpecValidator - for method, pathItem := range s.analyzer.Operations() { + for method, pathItem := range s.expandedAnalyzer().Operations() { for path, op := range pathItem { // parameters for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index f4b7a2dfe9..c8bffd78e5 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -68,7 +68,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { res := new(Result) s := ex.SpecValidator - for method, pathItem := range s.analyzer.Operations() { + for method, pathItem := range s.expandedAnalyzer().Operations() { for path, op := range pathItem { // parameters for _, param := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index 5d901dda71..48ebfab58e 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -210,7 +210,7 @@ type paramHelper struct { } func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, res *Result, s *SpecValidator) (params []spec.Parameter) { - operation, ok := s.analyzer.OperationFor(method, path) + operation, ok := s.expandedAnalyzer().OperationFor(method, path) if ok { // expand parameters first if necessary resolvedParams := []spec.Parameter{} @@ -224,7 +224,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re // remove params with invalid expansion from Slice operation.Parameters = resolvedParams - for _, ppr := range s.analyzer.SafeParamsFor(method, path, + for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, func(p spec.Parameter, err error) bool { // since params have already been expanded, there are few causes for error res.AddErrors(someParametersBrokenMsg(path, method, operationID)) diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index cdf5627a2c..dff01f00be 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -624,7 +624,7 @@ func (s *SpecValidator) validateParameters() *Result { // - path param must be required res := new(Result) rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) - for method, pi := range s.analyzer.Operations() { + for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) for path, op := range pi { pathToAdd := pathHelp.stripParametersInPath(path) @@ -793,3 +793,12 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio func (s *SpecValidator) SetContinueOnErrors(c bool) { s.Options.ContinueOnErrors = c } + +// expandedAnalyzer returns expanded.Analyzer when it is available. +// otherwise just analyzer. +func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { + if s.expanded != nil && s.expanded.Analyzer != nil { + return s.expanded.Analyzer + } + return s.analyzer +} diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 6712e95aa4..8b730b6d30 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.10.1-green.svg) +![Project status](https://img.shields.io/badge/version-10.11.0-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -130,7 +130,7 @@ Baked-in Validations | contains | Contains | | containsany | Contains Any | | containsrune | Contains Rune | -| endsnotwith | Ends With | +| endsnotwith | Ends Not With | | endswith | Ends With | | excludes | Excludes | | excludesall | Excludes All | @@ -153,6 +153,7 @@ Baked-in Validations | bcp47_language_tag | Language tag (BCP 47) | | btc_addr | Bitcoin Address | | btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | +| credit_card | Credit Card Number | | datetime | Datetime | | e164 | e164 formatted phone number | | email | E-mail String @@ -189,6 +190,16 @@ Baked-in Validations | uuid5 | Universally Unique Identifier UUID v5 | | uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 | | uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 | +| md4 | MD4 hash | +| md5 | MD5 hash | +| sha256 | SHA256 hash | +| sha384 | SHA384 hash | +| sha512 | SHA512 hash | +| ripemd128 | RIPEMD-128 hash | +| ripemd128 | RIPEMD-160 hash | +| tiger128 | TIGER128 hash | +| tiger160 | TIGER160 hash | +| tiger192 | TIGER192 hash | | semver | Semantic Versioning 2.0.0 | | ulid | Universally Unique Lexicographically Sortable Identifier ULID | @@ -219,6 +230,8 @@ Baked-in Validations | required_with_all | Required With All | | required_without | Required Without | | required_without_all | Required Without All | +| excluded_if | Excluded If | +| excluded_unless | Excluded Unless | | excluded_with | Excluded With | | excluded_with_all | Excluded With All | | excluded_without | Excluded Without | diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index 7868b66fa7..f2f0939cf2 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -75,6 +75,8 @@ var ( "required_with_all": requiredWithAll, "required_without": requiredWithout, "required_without_all": requiredWithoutAll, + "excluded_if": excludedIf, + "excluded_unless": excludedUnless, "excluded_with": excludedWith, "excluded_with_all": excludedWithAll, "excluded_without": excludedWithout, @@ -149,6 +151,16 @@ var ( "uuid4_rfc4122": isUUID4RFC4122, "uuid5_rfc4122": isUUID5RFC4122, "ulid": isULID, + "md4": isMD4, + "md5": isMD5, + "sha256": isSHA256, + "sha384": isSHA384, + "sha512": isSHA512, + "ripemd128": isRIPEMD128, + "ripemd160": isRIPEMD160, + "tiger128": isTIGER128, + "tiger160": isTIGER160, + "tiger192": isTIGER192, "ascii": isASCII, "printascii": isPrintableASCII, "multibyte": hasMultiByteCharacter, @@ -201,11 +213,14 @@ var ( "bic": isIsoBicFormat, "semver": isSemverFormat, "dns_rfc1035_label": isDnsRFC1035LabelFormat, + "credit_card": isCreditCard, } ) -var oneofValsCache = map[string][]string{} -var oneofValsCacheRWLock = sync.RWMutex{} +var ( + oneofValsCache = map[string][]string{} + oneofValsCacheRWLock = sync.RWMutex{} +) func parseOneOfParam2(s string) []string { oneofValsCacheRWLock.RLock() @@ -261,7 +276,6 @@ func isOneOf(fl FieldLevel) bool { // isUnique is the validation function for validating if each array|slice|map value is unique func isUnique(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() v := reflect.ValueOf(struct{}{}) @@ -311,7 +325,6 @@ func isUnique(fl FieldLevel) bool { // isMAC is the validation function for validating if the field's value is a valid MAC address. func isMAC(fl FieldLevel) bool { - _, err := net.ParseMAC(fl.Field().String()) return err == nil @@ -319,7 +332,6 @@ func isMAC(fl FieldLevel) bool { // isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. func isCIDRv4(fl FieldLevel) bool { - ip, _, err := net.ParseCIDR(fl.Field().String()) return err == nil && ip.To4() != nil @@ -327,7 +339,6 @@ func isCIDRv4(fl FieldLevel) bool { // isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. func isCIDRv6(fl FieldLevel) bool { - ip, _, err := net.ParseCIDR(fl.Field().String()) return err == nil && ip.To4() == nil @@ -335,7 +346,6 @@ func isCIDRv6(fl FieldLevel) bool { // isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. func isCIDR(fl FieldLevel) bool { - _, _, err := net.ParseCIDR(fl.Field().String()) return err == nil @@ -343,7 +353,6 @@ func isCIDR(fl FieldLevel) bool { // isIPv4 is the validation function for validating if a value is a valid v4 IP address. func isIPv4(fl FieldLevel) bool { - ip := net.ParseIP(fl.Field().String()) return ip != nil && ip.To4() != nil @@ -351,7 +360,6 @@ func isIPv4(fl FieldLevel) bool { // isIPv6 is the validation function for validating if the field's value is a valid v6 IP address. func isIPv6(fl FieldLevel) bool { - ip := net.ParseIP(fl.Field().String()) return ip != nil && ip.To4() == nil @@ -359,7 +367,6 @@ func isIPv6(fl FieldLevel) bool { // isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. func isIP(fl FieldLevel) bool { - ip := net.ParseIP(fl.Field().String()) return ip != nil @@ -367,7 +374,6 @@ func isIP(fl FieldLevel) bool { // isSSN is the validation function for validating if the field's value is a valid SSN. func isSSN(fl FieldLevel) bool { - field := fl.Field() if field.Len() != 11 { @@ -425,7 +431,6 @@ func isLatitude(fl FieldLevel) bool { // isDataURI is the validation function for validating if the field's value is a valid data URI. func isDataURI(fl FieldLevel) bool { - uri := strings.SplitN(fl.Field().String(), ",", 2) if len(uri) != 2 { @@ -441,7 +446,6 @@ func isDataURI(fl FieldLevel) bool { // hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. func hasMultiByteCharacter(fl FieldLevel) bool { - field := fl.Field() if field.Len() == 0 { @@ -506,6 +510,56 @@ func isULID(fl FieldLevel) bool { return uLIDRegex.MatchString(fl.Field().String()) } +// isMD4 is the validation function for validating if the field's value is a valid MD4. +func isMD4(fl FieldLevel) bool { + return md4Regex.MatchString(fl.Field().String()) +} + +// isMD5 is the validation function for validating if the field's value is a valid MD5. +func isMD5(fl FieldLevel) bool { + return md5Regex.MatchString(fl.Field().String()) +} + +// isSHA256 is the validation function for validating if the field's value is a valid SHA256. +func isSHA256(fl FieldLevel) bool { + return sha256Regex.MatchString(fl.Field().String()) +} + +// isSHA384 is the validation function for validating if the field's value is a valid SHA384. +func isSHA384(fl FieldLevel) bool { + return sha384Regex.MatchString(fl.Field().String()) +} + +// isSHA512 is the validation function for validating if the field's value is a valid SHA512. +func isSHA512(fl FieldLevel) bool { + return sha512Regex.MatchString(fl.Field().String()) +} + +// isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128. +func isRIPEMD128(fl FieldLevel) bool { + return ripemd128Regex.MatchString(fl.Field().String()) +} + +// isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160. +func isRIPEMD160(fl FieldLevel) bool { + return ripemd160Regex.MatchString(fl.Field().String()) +} + +// isTIGER128 is the validation function for validating if the field's value is a valid TIGER128. +func isTIGER128(fl FieldLevel) bool { + return tiger128Regex.MatchString(fl.Field().String()) +} + +// isTIGER160 is the validation function for validating if the field's value is a valid TIGER160. +func isTIGER160(fl FieldLevel) bool { + return tiger160Regex.MatchString(fl.Field().String()) +} + +// isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192. +func isTIGER192(fl FieldLevel) bool { + return tiger192Regex.MatchString(fl.Field().String()) +} + // isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. func isISBN(fl FieldLevel) bool { return isISBN10(fl) || isISBN13(fl) @@ -513,7 +567,6 @@ func isISBN(fl FieldLevel) bool { // isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. func isISBN13(fl FieldLevel) bool { - s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) if !iSBN13Regex.MatchString(s) { @@ -534,7 +587,6 @@ func isISBN13(fl FieldLevel) bool { // isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. func isISBN10(fl FieldLevel) bool { - s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) if !iSBN10Regex.MatchString(s) { @@ -722,7 +774,6 @@ func excludes(fl FieldLevel) bool { // containsRune is the validation function for validating that the field's value contains the rune specified within the param. func containsRune(fl FieldLevel) bool { - r, _ := utf8.DecodeRuneInString(fl.Param()) return strings.ContainsRune(fl.Field().String(), r) @@ -785,7 +836,6 @@ func fieldExcludes(fl FieldLevel) bool { // isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. func isNeField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -816,12 +866,7 @@ func isNeField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return true - } - - if fieldType == timeType { + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { t := currentField.Interface().(time.Time) fieldTime := field.Interface().(time.Time) @@ -829,6 +874,10 @@ func isNeField(fl FieldLevel) bool { return !fieldTime.Equal(t) } + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } } // default reflect.String: @@ -842,7 +891,6 @@ func isNe(fl FieldLevel) bool { // isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. func isLteCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -869,18 +917,18 @@ func isLteCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) + topTime := topField.Convert(timeType).Interface().(time.Time) return fieldTime.Before(topTime) || fieldTime.Equal(topTime) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } } // default reflect.String: @@ -890,7 +938,6 @@ func isLteCrossStructField(fl FieldLevel) bool { // isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. // NOTE: This is exposed for use within your own custom functions and not intended to be called directly. func isLtCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -917,18 +964,18 @@ func isLtCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) + topTime := topField.Convert(timeType).Interface().(time.Time) return fieldTime.Before(topTime) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } } // default reflect.String: @@ -937,7 +984,6 @@ func isLtCrossStructField(fl FieldLevel) bool { // isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. func isGteCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -964,18 +1010,18 @@ func isGteCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } - - if fieldType == timeType { + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) + topTime := topField.Convert(timeType).Interface().(time.Time) return fieldTime.After(topTime) || fieldTime.Equal(topTime) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } } // default reflect.String: @@ -984,7 +1030,6 @@ func isGteCrossStructField(fl FieldLevel) bool { // isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. func isGtCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1011,18 +1056,18 @@ func isGtCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - fieldTime := field.Interface().(time.Time) - topTime := topField.Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) + topTime := topField.Convert(timeType).Interface().(time.Time) return fieldTime.After(topTime) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } } // default reflect.String: @@ -1031,7 +1076,6 @@ func isGtCrossStructField(fl FieldLevel) bool { // isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. func isNeCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1061,18 +1105,18 @@ func isNeCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return true - } + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - t := field.Interface().(time.Time) - fieldTime := topField.Interface().(time.Time) + t := field.Convert(timeType).Interface().(time.Time) + fieldTime := topField.Convert(timeType).Interface().(time.Time) return !fieldTime.Equal(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } } // default reflect.String: @@ -1081,7 +1125,6 @@ func isNeCrossStructField(fl FieldLevel) bool { // isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. func isEqCrossStructField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1111,18 +1154,18 @@ func isEqCrossStructField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != topField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - t := field.Interface().(time.Time) - fieldTime := topField.Interface().(time.Time) + t := field.Convert(timeType).Interface().(time.Time) + fieldTime := topField.Convert(timeType).Interface().(time.Time) return fieldTime.Equal(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } } // default reflect.String: @@ -1131,7 +1174,6 @@ func isEqCrossStructField(fl FieldLevel) bool { // isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. func isEqField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1161,19 +1203,18 @@ func isEqField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) + t := currentField.Convert(timeType).Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) return fieldTime.Equal(t) } + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } } // default reflect.String: @@ -1182,7 +1223,6 @@ func isEqField(fl FieldLevel) bool { // isEq is the validation function for validating if the current field's value is equal to the param's value. func isEq(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -1234,7 +1274,7 @@ func isPostcodeByIso3166Alpha2(fl FieldLevel) bool { return reg.MatchString(field.String()) } -// isPostcodeByIso3166Alpha2 validates by field which represents for a value of country code in iso 3166 alpha 2 +// isPostcodeByIso3166Alpha2Field validates by field which represents for a value of country code in iso 3166 alpha 2 // example: `postcode_iso3166_alpha2_field=CountryCode` func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool { field := fl.Field() @@ -1273,11 +1313,9 @@ func isBase64URL(fl FieldLevel) bool { // isURI is the validation function for validating if the current field's value is a valid URI. func isURI(fl FieldLevel) bool { - field := fl.Field() switch field.Kind() { - case reflect.String: s := field.String() @@ -1302,11 +1340,9 @@ func isURI(fl FieldLevel) bool { // isURL is the validation function for validating if the current field's value is a valid URL. func isURL(fl FieldLevel) bool { - field := fl.Field() switch field.Kind() { - case reflect.String: var i int @@ -1339,7 +1375,6 @@ func isUrnRFC2141(fl FieldLevel) bool { field := fl.Field() switch field.Kind() { - case reflect.String: str := field.String() @@ -1542,6 +1577,22 @@ func requiredIf(fl FieldLevel) bool { return hasValue(fl) } +// excludedIf is the validation function +// The field under validation must not be present or is empty only if all the other specified fields are equal to the value following with the specified field. +func excludedIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for excluded_if %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return false + } + } + return true +} + // requiredUnless is the validation function // The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. func requiredUnless(fl FieldLevel) bool { @@ -1558,6 +1609,21 @@ func requiredUnless(fl FieldLevel) bool { return hasValue(fl) } +// excludedUnless is the validation function +// The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field. +func excludedUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for excluded_unless %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return !hasValue(fl) +} + // excludedWith is the validation function // The field under validation must not be present or is empty if any of the other specified fields are present. func excludedWith(fl FieldLevel) bool { @@ -1650,7 +1716,6 @@ func requiredWithoutAll(fl FieldLevel) bool { // isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. func isGteField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1677,18 +1742,18 @@ func isGteField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) + t := currentField.Convert(timeType).Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) return fieldTime.After(t) || fieldTime.Equal(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } } // default reflect.String @@ -1697,7 +1762,6 @@ func isGteField(fl FieldLevel) bool { // isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. func isGtField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1724,18 +1788,18 @@ func isGtField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) + t := currentField.Convert(timeType).Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) return fieldTime.After(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } } // default reflect.String @@ -1744,7 +1808,6 @@ func isGtField(fl FieldLevel) bool { // isGte is the validation function for validating if the current field's value is greater than or equal to the param's value. func isGte(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -1777,10 +1840,10 @@ func isGte(fl FieldLevel) bool { case reflect.Struct: - if field.Type() == timeType { + if field.Type().ConvertibleTo(timeType) { now := time.Now().UTC() - t := field.Interface().(time.Time) + t := field.Convert(timeType).Interface().(time.Time) return t.After(now) || t.Equal(now) } @@ -1791,7 +1854,6 @@ func isGte(fl FieldLevel) bool { // isGt is the validation function for validating if the current field's value is greater than the param's value. func isGt(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -1823,9 +1885,9 @@ func isGt(fl FieldLevel) bool { return field.Float() > p case reflect.Struct: - if field.Type() == timeType { + if field.Type().ConvertibleTo(timeType) { - return field.Interface().(time.Time).After(time.Now().UTC()) + return field.Convert(timeType).Interface().(time.Time).After(time.Now().UTC()) } } @@ -1834,7 +1896,6 @@ func isGt(fl FieldLevel) bool { // hasLengthOf is the validation function for validating if the current field's value is equal to the param's value. func hasLengthOf(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -1876,7 +1937,6 @@ func hasMinOf(fl FieldLevel) bool { // isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. func isLteField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1903,18 +1963,18 @@ func isLteField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } - - if fieldType == timeType { + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) + t := currentField.Convert(timeType).Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) return fieldTime.Before(t) || fieldTime.Equal(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } } // default reflect.String @@ -1923,7 +1983,6 @@ func isLteField(fl FieldLevel) bool { // isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. func isLtField(fl FieldLevel) bool { - field := fl.Field() kind := field.Kind() @@ -1950,18 +2009,18 @@ func isLtField(fl FieldLevel) bool { fieldType := field.Type() - // Not Same underlying type i.e. struct and time - if fieldType != currentField.Type() { - return false - } + if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) { - if fieldType == timeType { - - t := currentField.Interface().(time.Time) - fieldTime := field.Interface().(time.Time) + t := currentField.Convert(timeType).Interface().(time.Time) + fieldTime := field.Convert(timeType).Interface().(time.Time) return fieldTime.Before(t) } + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } } // default reflect.String @@ -1970,7 +2029,6 @@ func isLtField(fl FieldLevel) bool { // isLte is the validation function for validating if the current field's value is less than or equal to the param's value. func isLte(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -2003,10 +2061,10 @@ func isLte(fl FieldLevel) bool { case reflect.Struct: - if field.Type() == timeType { + if field.Type().ConvertibleTo(timeType) { now := time.Now().UTC() - t := field.Interface().(time.Time) + t := field.Convert(timeType).Interface().(time.Time) return t.Before(now) || t.Equal(now) } @@ -2017,7 +2075,6 @@ func isLte(fl FieldLevel) bool { // isLt is the validation function for validating if the current field's value is less than the param's value. func isLt(fl FieldLevel) bool { - field := fl.Field() param := fl.Param() @@ -2050,9 +2107,9 @@ func isLt(fl FieldLevel) bool { case reflect.Struct: - if field.Type() == timeType { + if field.Type().ConvertibleTo(timeType) { - return field.Interface().(time.Time).Before(time.Now().UTC()) + return field.Convert(timeType).Interface().(time.Time).Before(time.Now().UTC()) } } @@ -2066,7 +2123,6 @@ func hasMaxOf(fl FieldLevel) bool { // isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. func isTCP4AddrResolvable(fl FieldLevel) bool { - if !isIP4Addr(fl) { return false } @@ -2077,7 +2133,6 @@ func isTCP4AddrResolvable(fl FieldLevel) bool { // isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. func isTCP6AddrResolvable(fl FieldLevel) bool { - if !isIP6Addr(fl) { return false } @@ -2089,7 +2144,6 @@ func isTCP6AddrResolvable(fl FieldLevel) bool { // isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. func isTCPAddrResolvable(fl FieldLevel) bool { - if !isIP4Addr(fl) && !isIP6Addr(fl) { return false } @@ -2101,7 +2155,6 @@ func isTCPAddrResolvable(fl FieldLevel) bool { // isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. func isUDP4AddrResolvable(fl FieldLevel) bool { - if !isIP4Addr(fl) { return false } @@ -2113,7 +2166,6 @@ func isUDP4AddrResolvable(fl FieldLevel) bool { // isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. func isUDP6AddrResolvable(fl FieldLevel) bool { - if !isIP6Addr(fl) { return false } @@ -2125,7 +2177,6 @@ func isUDP6AddrResolvable(fl FieldLevel) bool { // isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. func isUDPAddrResolvable(fl FieldLevel) bool { - if !isIP4Addr(fl) && !isIP6Addr(fl) { return false } @@ -2137,7 +2188,6 @@ func isUDPAddrResolvable(fl FieldLevel) bool { // isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. func isIP4AddrResolvable(fl FieldLevel) bool { - if !isIPv4(fl) { return false } @@ -2149,7 +2199,6 @@ func isIP4AddrResolvable(fl FieldLevel) bool { // isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. func isIP6AddrResolvable(fl FieldLevel) bool { - if !isIPv6(fl) { return false } @@ -2161,7 +2210,6 @@ func isIP6AddrResolvable(fl FieldLevel) bool { // isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. func isIPAddrResolvable(fl FieldLevel) bool { - if !isIP(fl) { return false } @@ -2173,14 +2221,12 @@ func isIPAddrResolvable(fl FieldLevel) bool { // isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. func isUnixAddrResolvable(fl FieldLevel) bool { - _, err := net.ResolveUnixAddr("unix", fl.Field().String()) return err == nil } func isIP4Addr(fl FieldLevel) bool { - val := fl.Field().String() if idx := strings.LastIndex(val, ":"); idx != -1 { @@ -2193,7 +2239,6 @@ func isIP4Addr(fl FieldLevel) bool { } func isIP6Addr(fl FieldLevel) bool { - val := fl.Field().String() if idx := strings.LastIndex(val, ":"); idx != -1 { @@ -2436,3 +2481,41 @@ func isDnsRFC1035LabelFormat(fl FieldLevel) bool { val := fl.Field().String() return dnsRegexRFC1035Label.MatchString(val) } + +// isCreditCard is the validation function for validating if the current field's value is a valid credit card number +func isCreditCard(fl FieldLevel) bool { + val := fl.Field().String() + var creditCard bytes.Buffer + segments := strings.Split(val, " ") + for _, segment := range segments { + if len(segment) < 3 { + return false + } + creditCard.WriteString(segment) + } + + ccDigits := strings.Split(creditCard.String(), "") + size := len(ccDigits) + if size < 12 || size > 19 { + return false + } + + sum := 0 + for i, digit := range ccDigits { + value, err := strconv.Atoi(digit) + if err != nil { + return false + } + if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 { + v := value * 2 + if v >= 10 { + sum += 1 + (v % 10) + } else { + sum += v + } + } else { + sum += value + } + } + return (sum % 10) == 0 +} diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go index 0d18d6ec49..7b84c91fe5 100644 --- a/vendor/github.com/go-playground/validator/v10/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -114,12 +114,13 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} numFields := current.NumField() + rules := v.rules[typ] var ctag *cTag var fld reflect.StructField var tag string var customName string - + for i := 0; i < numFields; i++ { fld = typ.Field(i) @@ -128,7 +129,11 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr continue } - tag = fld.Tag.Get(v.tagName) + if rtag, ok := rules[fld.Name]; ok { + tag = rtag + } else { + tag = fld.Tag.Get(v.tagName) + } if tag == skipValidationTag { continue diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index b284c379d6..7341c67d74 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -349,6 +349,40 @@ Example: // require the field if the Field1 and Field2 is not present: Usage: required_without_all=Field1 Field2 +Excluded If + +The field under validation must not be present or not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: excluded_if + +Examples: + + // exclude the field if the Field1 is equal to the parameter given: + Usage: excluded_if=Field1 foobar + + // exclude the field if the Field1 and Field2 is equal to the value respectively: + Usage: excluded_if=Field1 foo Field2 bar + +Excluded Unless + +The field under validation must not be present or empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: excluded_unless + +Examples: + + // exclude the field unless the Field1 is equal to the parameter given: + Usage: excluded_unless=Field1 foobar + + // exclude the field unless the Field1 and Field2 is equal to the value respectively: + Usage: excluded_unless=Field1 foo Field2 bar + Is Default This validates that the value is the default value and is almost the @@ -1283,6 +1317,12 @@ More information on https://semver.org/ Usage: semver +Credit Card + +This validates that a string value contains a valid credit card number using Luhn algoritm. + + Usage: credit_card + Alias Validators and Tags NOTE: When returning an error, the tag returned in "FieldError" will be diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go index 48e51d571f..9c1c634239 100644 --- a/vendor/github.com/go-playground/validator/v10/regexes.go +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -30,6 +30,16 @@ const ( uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" uLIDRegexString = "^[A-HJKMNP-TV-Z0-9]{26}$" + md4RegexString = "^[0-9a-f]{32}$" + md5RegexString = "^[0-9a-f]{32}$" + sha256RegexString = "^[0-9a-f]{64}$" + sha384RegexString = "^[0-9a-f]{96}$" + sha512RegexString = "^[0-9a-f]{128}$" + ripemd128RegexString = "^[0-9a-f]{32}$" + ripemd160RegexString = "^[0-9a-f]{40}$" + tiger128RegexString = "^[0-9a-f]{32}$" + tiger160RegexString = "^[0-9a-f]{40}$" + tiger192RegexString = "^[0-9a-f]{48}$" aSCIIRegexString = "^[\x00-\x7F]*$" printableASCIIRegexString = "^[\x20-\x7E]*$" multibyteRegexString = "[^\x00-\x7F]" @@ -37,12 +47,12 @@ const ( latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$` - hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 - hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 - fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.') - btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address - btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 - btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 + hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 + fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.') + btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address + btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 ethAddressRegexString = `^0x[0-9a-fA-F]{40}$` ethAddressUpperRegexString = `^0x[0-9A-F]{40}$` ethAddressLowerRegexString = `^0x[0-9a-f]{40}$` @@ -84,6 +94,16 @@ var ( uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) uLIDRegex = regexp.MustCompile(uLIDRegexString) + md4Regex = regexp.MustCompile(md4RegexString) + md5Regex = regexp.MustCompile(md5RegexString) + sha256Regex = regexp.MustCompile(sha256RegexString) + sha384Regex = regexp.MustCompile(sha384RegexString) + sha512Regex = regexp.MustCompile(sha512RegexString) + ripemd128Regex = regexp.MustCompile(ripemd128RegexString) + ripemd160Regex = regexp.MustCompile(ripemd160RegexString) + tiger128Regex = regexp.MustCompile(tiger128RegexString) + tiger160Regex = regexp.MustCompile(tiger160RegexString) + tiger192Regex = regexp.MustCompile(tiger192RegexString) aSCIIRegex = regexp.MustCompile(aSCIIRegexString) printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) multibyteRegex = regexp.MustCompile(multibyteRegexString) diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index 56420f4301..36da85514e 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -82,7 +82,7 @@ BEGIN: fld := namespace var ns string - if typ != timeType { + if !typ.ConvertibleTo(timeType) { idx := strings.Index(namespace, namespaceSeparator) diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go index 2a4fad022d..80da095a63 100644 --- a/vendor/github.com/go-playground/validator/v10/validator.go +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -164,7 +164,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr typ = current.Type() - if typ != timeType { + if !typ.ConvertibleTo(timeType) { if ct != nil { @@ -355,6 +355,10 @@ OUTER: v.ct = ct if ct.fn(ctx, v) { + if ct.isBlockEnd { + ct = ct.next + continue OUTER + } // drain rest of the 'or' values, then continue or leave for { @@ -368,6 +372,11 @@ OUTER: if ct.typeof != typeOr { continue OUTER } + + if ct.isBlockEnd { + ct = ct.next + continue OUTER + } } } diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index 973964fc20..9493da491a 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -33,6 +33,8 @@ const ( excludedWithoutTag = "excluded_without" excludedWithTag = "excluded_with" excludedWithAllTag = "excluded_with_all" + excludedIfTag = "excluded_if" + excludedUnlessTag = "excluded_unless" skipValidationTag = "-" diveTag = "dive" keysTag = "keys" @@ -84,6 +86,7 @@ type Validate struct { aliases map[string]string validations map[string]internalValidationFuncWrapper transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + rules map[reflect.Type]map[string]string tagCache *tagCache structCache *structCache } @@ -120,7 +123,7 @@ func New() *Validate { switch k { // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag, - excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag: + excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag: _ = v.registerValidation(k, wrapFunc(val), true, true) default: // no need to error check here, baked in will always be valid @@ -152,15 +155,24 @@ func (v *Validate) SetTagName(name string) { func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { errs := make(map[string]interface{}) for field, rule := range rules { - if reflect.ValueOf(rule).Kind() == reflect.Map && reflect.ValueOf(data[field]).Kind() == reflect.Map { - err := v.ValidateMapCtx(ctx, data[field].(map[string]interface{}), rule.(map[string]interface{})) - if len(err) > 0 { - errs[field] = err + if ruleObj, ok := rule.(map[string]interface{}); ok { + if dataObj, ok := data[field].(map[string]interface{}); ok { + err := v.ValidateMapCtx(ctx, dataObj, ruleObj) + if len(err) > 0 { + errs[field] = err + } + } else if dataObjs, ok := data[field].([]map[string]interface{}); ok { + for _, obj := range dataObjs { + err := v.ValidateMapCtx(ctx, obj, ruleObj) + if len(err) > 0 { + errs[field] = err + } + } + } else { + errs[field] = errors.New("The field: '" + field + "' is not a map to dive") } - } else if reflect.ValueOf(rule).Kind() == reflect.Map { - errs[field] = errors.New("The field: '" + field + "' is not a map to dive") - } else { - err := v.VarCtx(ctx, data[field], rule.(string)) + } else if ruleStr, ok := rule.(string); ok { + err := v.VarCtx(ctx, data[field], ruleStr) if err != nil { errs[field] = err } @@ -169,7 +181,7 @@ func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{ return errs } -// ValidateMap validates map data form a map of tags +// ValidateMap validates map data from a map of tags func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { return v.ValidateMapCtx(context.Background(), data, rules) } @@ -180,6 +192,7 @@ func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]int // // validate.RegisterTagNameFunc(func(fld reflect.StructField) string { // name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// // skip if tag key says it should be ignored // if name == "-" { // return "" // } @@ -271,6 +284,34 @@ func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...i } } +// RegisterStructValidationMapRules registers validate map rules. +// Be aware that map validation rules supersede those defined on a/the struct if present. +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationMapRules(rules map[string]string, types ...interface{}) { + if v.rules == nil { + v.rules = make(map[reflect.Type]map[string]string) + } + + deepCopyRules := make(map[string]string) + for i, rule := range rules { + deepCopyRules[i] = rule + } + + for _, t := range types { + typ := reflect.TypeOf(t) + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + if typ.Kind() != reflect.Struct { + continue + } + v.rules[typ] = deepCopyRules + } +} + // RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types // // NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation @@ -331,7 +372,7 @@ func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { val = val.Elem() } - if val.Kind() != reflect.Struct || val.Type() == timeType { + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { return &InvalidValidationError{Type: reflect.TypeOf(s)} } @@ -376,7 +417,7 @@ func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn Filt val = val.Elem() } - if val.Kind() != reflect.Struct || val.Type() == timeType { + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { return &InvalidValidationError{Type: reflect.TypeOf(s)} } @@ -424,7 +465,7 @@ func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields . val = val.Elem() } - if val.Kind() != reflect.Struct || val.Type() == timeType { + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { return &InvalidValidationError{Type: reflect.TypeOf(s)} } @@ -514,7 +555,7 @@ func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields .. val = val.Elem() } - if val.Kind() != reflect.Struct || val.Type() == timeType { + if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) { return &InvalidValidationError{Type: reflect.TypeOf(s)} } diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md deleted file mode 100644 index f11ccccaa4..0000000000 --- a/vendor/github.com/go-stack/stack/README.md +++ /dev/null @@ -1,38 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack) -[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack) -[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack) -[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master) - -# stack - -Package stack implements utilities to capture, manipulate, and format call -stacks. It provides a simpler API than package runtime. - -The implementation takes care of the minutia and special cases of interpreting -the program counter (pc) values returned by runtime.Callers. - -## Versioning - -Package stack publishes releases via [semver](http://semver.org/) compatible Git -tags prefixed with a single 'v'. The master branch always contains the latest -release. The develop branch contains unreleased commits. - -## Formatting - -Package stack's types implement fmt.Formatter, which provides a simple and -flexible way to declaratively configure formatting when used with logging or -error tracking packages. - -```go -func DoTheThing() { - c := stack.Caller(0) - log.Print(c) // "source.go:10" - log.Printf("%+v", c) // "pkg/path/source.go:10" - log.Printf("%n", c) // "DoTheThing" - - s := stack.Trace().TrimRuntime() - log.Print(s) // "[source.go:15 caller.go:42 main.go:14]" -} -``` - -See the docs for all of the supported formatting options. diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go deleted file mode 100644 index ac3b93b14f..0000000000 --- a/vendor/github.com/go-stack/stack/stack.go +++ /dev/null @@ -1,400 +0,0 @@ -// +build go1.7 - -// Package stack implements utilities to capture, manipulate, and format call -// stacks. It provides a simpler API than package runtime. -// -// The implementation takes care of the minutia and special cases of -// interpreting the program counter (pc) values returned by runtime.Callers. -// -// Package stack's types implement fmt.Formatter, which provides a simple and -// flexible way to declaratively configure formatting when used with logging -// or error tracking packages. -package stack - -import ( - "bytes" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "strings" -) - -// Call records a single function invocation from a goroutine stack. -type Call struct { - frame runtime.Frame -} - -// Caller returns a Call from the stack of the current goroutine. The argument -// skip is the number of stack frames to ascend, with 0 identifying the -// calling function. -func Caller(skip int) Call { - // As of Go 1.9 we need room for up to three PC entries. - // - // 0. An entry for the stack frame prior to the target to check for - // special handling needed if that prior entry is runtime.sigpanic. - // 1. A possible second entry to hold metadata about skipped inlined - // functions. If inline functions were not skipped the target frame - // PC will be here. - // 2. A third entry for the target frame PC when the second entry - // is used for skipped inline functions. - var pcs [3]uintptr - n := runtime.Callers(skip+1, pcs[:]) - frames := runtime.CallersFrames(pcs[:n]) - frame, _ := frames.Next() - frame, _ = frames.Next() - - return Call{ - frame: frame, - } -} - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c). -func (c Call) String() string { - return fmt.Sprint(c) -} - -// MarshalText implements encoding.TextMarshaler. It formats the Call the same -// as fmt.Sprintf("%v", c). -func (c Call) MarshalText() ([]byte, error) { - if c.frame == (runtime.Frame{}) { - return nil, ErrNoFunc - } - - buf := bytes.Buffer{} - fmt.Fprint(&buf, c) - return buf.Bytes(), nil -} - -// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely -// cause is a Call with the zero value. -var ErrNoFunc = errors.New("no call stack information") - -// Format implements fmt.Formatter with support for the following verbs. -// -// %s source file -// %d line number -// %n function name -// %k last segment of the package path -// %v equivalent to %s:%d -// -// It accepts the '+' and '#' flags for most of the verbs as follows. -// -// %+s path of source file relative to the compile time GOPATH, -// or the module path joined to the path of source file relative -// to module root -// %#s full path of source file -// %+n import path qualified function name -// %+k full package path -// %+v equivalent to %+s:%d -// %#v equivalent to %#s:%d -func (c Call) Format(s fmt.State, verb rune) { - if c.frame == (runtime.Frame{}) { - fmt.Fprintf(s, "%%!%c(NOFUNC)", verb) - return - } - - switch verb { - case 's', 'v': - file := c.frame.File - switch { - case s.Flag('#'): - // done - case s.Flag('+'): - file = pkgFilePath(&c.frame) - default: - const sep = "/" - if i := strings.LastIndex(file, sep); i != -1 { - file = file[i+len(sep):] - } - } - io.WriteString(s, file) - if verb == 'v' { - buf := [7]byte{':'} - s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10)) - } - - case 'd': - buf := [6]byte{} - s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10)) - - case 'k': - name := c.frame.Function - const pathSep = "/" - start, end := 0, len(name) - if i := strings.LastIndex(name, pathSep); i != -1 { - start = i + len(pathSep) - } - const pkgSep = "." - if i := strings.Index(name[start:], pkgSep); i != -1 { - end = start + i - } - if s.Flag('+') { - start = 0 - } - io.WriteString(s, name[start:end]) - - case 'n': - name := c.frame.Function - if !s.Flag('+') { - const pathSep = "/" - if i := strings.LastIndex(name, pathSep); i != -1 { - name = name[i+len(pathSep):] - } - const pkgSep = "." - if i := strings.Index(name, pkgSep); i != -1 { - name = name[i+len(pkgSep):] - } - } - io.WriteString(s, name) - } -} - -// Frame returns the call frame infomation for the Call. -func (c Call) Frame() runtime.Frame { - return c.frame -} - -// PC returns the program counter for this call frame; multiple frames may -// have the same PC value. -// -// Deprecated: Use Call.Frame instead. -func (c Call) PC() uintptr { - return c.frame.PC -} - -// CallStack records a sequence of function invocations from a goroutine -// stack. -type CallStack []Call - -// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs). -func (cs CallStack) String() string { - return fmt.Sprint(cs) -} - -var ( - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - spaceBytes = []byte(" ") -) - -// MarshalText implements encoding.TextMarshaler. It formats the CallStack the -// same as fmt.Sprintf("%v", cs). -func (cs CallStack) MarshalText() ([]byte, error) { - buf := bytes.Buffer{} - buf.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - buf.Write(spaceBytes) - } - fmt.Fprint(&buf, pc) - } - buf.Write(closeBracketBytes) - return buf.Bytes(), nil -} - -// Format implements fmt.Formatter by printing the CallStack as square brackets -// ([, ]) surrounding a space separated list of Calls each formatted with the -// supplied verb and options. -func (cs CallStack) Format(s fmt.State, verb rune) { - s.Write(openBracketBytes) - for i, pc := range cs { - if i > 0 { - s.Write(spaceBytes) - } - pc.Format(s, verb) - } - s.Write(closeBracketBytes) -} - -// Trace returns a CallStack for the current goroutine with element 0 -// identifying the calling function. -func Trace() CallStack { - var pcs [512]uintptr - n := runtime.Callers(1, pcs[:]) - - frames := runtime.CallersFrames(pcs[:n]) - cs := make(CallStack, 0, n) - - // Skip extra frame retrieved just to make sure the runtime.sigpanic - // special case is handled. - frame, more := frames.Next() - - for more { - frame, more = frames.Next() - cs = append(cs, Call{frame: frame}) - } - - return cs -} - -// TrimBelow returns a slice of the CallStack with all entries below c -// removed. -func (cs CallStack) TrimBelow(c Call) CallStack { - for len(cs) > 0 && cs[0] != c { - cs = cs[1:] - } - return cs -} - -// TrimAbove returns a slice of the CallStack with all entries above c -// removed. -func (cs CallStack) TrimAbove(c Call) CallStack { - for len(cs) > 0 && cs[len(cs)-1] != c { - cs = cs[:len(cs)-1] - } - return cs -} - -// pkgIndex returns the index that results in file[index:] being the path of -// file relative to the compile time GOPATH, and file[:index] being the -// $GOPATH/src/ portion of file. funcName must be the name of a function in -// file as returned by runtime.Func.Name. -func pkgIndex(file, funcName string) int { - // As of Go 1.6.2 there is no direct way to know the compile time GOPATH - // at runtime, but we can infer the number of path segments in the GOPATH. - // We note that runtime.Func.Name() returns the function name qualified by - // the import path, which does not include the GOPATH. Thus we can trim - // segments from the beginning of the file path until the number of path - // separators remaining is one more than the number of path separators in - // the function name. For example, given: - // - // GOPATH /home/user - // file /home/user/src/pkg/sub/file.go - // fn.Name() pkg/sub.Type.Method - // - // We want to produce: - // - // file[:idx] == /home/user/src/ - // file[idx:] == pkg/sub/file.go - // - // From this we can easily see that fn.Name() has one less path separator - // than our desired result for file[idx:]. We count separators from the - // end of the file path until it finds two more than in the function name - // and then move one character forward to preserve the initial path - // segment without a leading separator. - const sep = "/" - i := len(file) - for n := strings.Count(funcName, sep) + 2; n > 0; n-- { - i = strings.LastIndex(file[:i], sep) - if i == -1 { - i = -len(sep) - break - } - } - // get back to 0 or trim the leading separator - return i + len(sep) -} - -// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH, -// or its module path joined to its path relative to the module root. -// -// As of Go 1.11 there is no direct way to know the compile time GOPATH or -// module paths at runtime, but we can piece together the desired information -// from available information. We note that runtime.Frame.Function contains the -// function name qualified by the package path, which includes the module path -// but not the GOPATH. We can extract the package path from that and append the -// last segments of the file path to arrive at the desired package qualified -// file path. For example, given: -// -// GOPATH /home/user -// import path pkg/sub -// frame.File /home/user/src/pkg/sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/sub/file.go -// -// It appears that we simply need to trim ".Type.Method" from frame.Function and -// append "/" + path.Base(file). -// -// But there are other wrinkles. Although it is idiomatic to do so, the internal -// name of a package is not required to match the last segment of its import -// path. In addition, the introduction of modules in Go 1.11 allows working -// without a GOPATH. So we also must make these work right: -// -// GOPATH /home/user -// import path pkg/go-sub -// package name sub -// frame.File /home/user/src/pkg/go-sub/file.go -// frame.Function pkg/sub.Type.Method -// Desired return pkg/go-sub/file.go -// -// Module path pkg/v2 -// import path pkg/v2/go-sub -// package name sub -// frame.File /home/user/cloned-pkg/go-sub/file.go -// frame.Function pkg/v2/sub.Type.Method -// Desired return pkg/v2/go-sub/file.go -// -// We can handle all of these situations by using the package path extracted -// from frame.Function up to, but not including, the last segment as the prefix -// and the last two segments of frame.File as the suffix of the returned path. -// This preserves the existing behavior when working in a GOPATH without modules -// and a semantically equivalent behavior when used in module aware project. -func pkgFilePath(frame *runtime.Frame) string { - pre := pkgPrefix(frame.Function) - post := pathSuffix(frame.File) - if pre == "" { - return post - } - return pre + "/" + post -} - -// pkgPrefix returns the import path of the function's package with the final -// segment removed. -func pkgPrefix(funcName string) string { - const pathSep = "/" - end := strings.LastIndex(funcName, pathSep) - if end == -1 { - return "" - } - return funcName[:end] -} - -// pathSuffix returns the last two segments of path. -func pathSuffix(path string) string { - const pathSep = "/" - lastSep := strings.LastIndex(path, pathSep) - if lastSep == -1 { - return path - } - return path[strings.LastIndex(path[:lastSep], pathSep)+1:] -} - -var runtimePath string - -func init() { - var pcs [3]uintptr - runtime.Callers(0, pcs[:]) - frames := runtime.CallersFrames(pcs[:]) - frame, _ := frames.Next() - file := frame.File - - idx := pkgIndex(frame.File, frame.Function) - - runtimePath = file[:idx] - if runtime.GOOS == "windows" { - runtimePath = strings.ToLower(runtimePath) - } -} - -func inGoroot(c Call) bool { - file := c.frame.File - if len(file) == 0 || file[0] == '?' { - return true - } - if runtime.GOOS == "windows" { - file = strings.ToLower(file) - } - return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go") -} - -// TrimRuntime returns a slice of the CallStack with the topmost entries from -// the go runtime removed. It considers any calls originating from unknown -// files, files under GOROOT, or _testmain.go as part of the runtime. -func (cs CallStack) TrimRuntime() CallStack { - for len(cs) > 0 && inGoroot(cs[len(cs)-1]) { - cs = cs[:len(cs)-1] - } - return cs -} diff --git a/vendor/github.com/form3tech-oss/jwt-go/.gitignore b/vendor/github.com/golang-jwt/jwt/.gitignore similarity index 95% rename from vendor/github.com/form3tech-oss/jwt-go/.gitignore rename to vendor/github.com/golang-jwt/jwt/.gitignore index c0e81a8d92..09573e0169 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/.gitignore +++ b/vendor/github.com/golang-jwt/jwt/.gitignore @@ -2,4 +2,3 @@ bin .idea/ - diff --git a/vendor/github.com/form3tech-oss/jwt-go/LICENSE b/vendor/github.com/golang-jwt/jwt/LICENSE similarity index 96% rename from vendor/github.com/form3tech-oss/jwt-go/LICENSE rename to vendor/github.com/golang-jwt/jwt/LICENSE index df83a9c2f0..35dbc25204 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/LICENSE +++ b/vendor/github.com/golang-jwt/jwt/LICENSE @@ -1,4 +1,5 @@ Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md new file mode 100644 index 0000000000..c4efbd2a8c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v3.2.1) + +Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1]), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +### go.mod replacement + +In a first step, the easiest way is to use `go mod edit` to issue a replacement. + +``` +go mod edit -replace github.com/dgrijalva/jwt-go=github.com/golang-jwt/jwt@v3.2.1+incompatible +go mod tidy +``` + +This will still keep the old import path in your code but replace it with the new package and also introduce a new indirect dependency to `github.com/golang-jwt/jwt`. Try to compile your project; it should still work. + +### Cleanup + +If your code still consistently builds, you can replace all occurences of `github.com/dgrijalva/jwt-go` with `github.com/golang-jwt/jwt`, either manually or by using tools such as `sed`. Finally, the `replace` directive in the `go.mod` file can be removed. + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. \ No newline at end of file diff --git a/vendor/github.com/form3tech-oss/jwt-go/README.md b/vendor/github.com/golang-jwt/jwt/README.md similarity index 63% rename from vendor/github.com/form3tech-oss/jwt-go/README.md rename to vendor/github.com/golang-jwt/jwt/README.md index d7749077fd..9b653e46b0 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/README.md +++ b/vendor/github.com/golang-jwt/jwt/README.md @@ -1,25 +1,34 @@ # jwt-go -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt) -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. +**IMPORT PATH CHANGE:** Starting from [v3.2.1](https://github.com/golang-jwt/jwt/releases/tag/v3.2.1), the import path has changed from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`. After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. +Future releases will be using the `github.com/golang-jwt/jwt` import path and continue the existing versioning scheme of `v3.x.x+incompatible`. Backwards-compatible patches and fixes will be done on the `v3` release branch, where as new build-breaking features will be developed in a `v4` release, possibly including a SIV-style import path. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. **SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + ## What the heck is a JWT? JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. ## What's in the box? @@ -27,11 +36,11 @@ This library supports the parsing and verification as well as the generation and ## Examples -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) ## Extensions @@ -41,17 +50,17 @@ Here's an example of an extension that integrates with multiple Google Cloud Pla ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RTF 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. ## Project Status & Versioning This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/golang-jwt/jwt.v3`. It will do the right thing WRT semantic versioning. **BREAKING CHANGES:*** * Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. @@ -79,9 +88,9 @@ Asymmetric signing methods, such as RSA, use different keys for signing and veri Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation ### JWT and OAuth @@ -99,6 +108,6 @@ This library uses descriptive error messages whenever possible. If you are not g ## More -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md similarity index 85% rename from vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md rename to vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md index 6370298313..637f2ba616 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/golang-jwt/jwt/VERSION_HISTORY.md @@ -1,5 +1,18 @@ ## `jwt-go` Version History +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + #### 3.2.0 * Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation @@ -115,4 +128,4 @@ It is likely the only integration change required here will be to change `func(t * First versioned release * API stabilized * Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/form3tech-oss/jwt-go/claims.go b/vendor/github.com/golang-jwt/jwt/claims.go similarity index 87% rename from vendor/github.com/form3tech-oss/jwt-go/claims.go rename to vendor/github.com/golang-jwt/jwt/claims.go index 624890666c..f1dba3cb91 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/claims.go +++ b/vendor/github.com/golang-jwt/jwt/claims.go @@ -16,7 +16,7 @@ type Claims interface { // https://tools.ietf.org/html/rfc7519#section-4.1 // See examples for how to use this with your own claim types type StandardClaims struct { - Audience []string `json:"aud,omitempty"` + Audience string `json:"aud,omitempty"` ExpiresAt int64 `json:"exp,omitempty"` Id string `json:"jti,omitempty"` IssuedAt int64 `json:"iat,omitempty"` @@ -35,18 +35,18 @@ func (c StandardClaims) Valid() error { // The claims below are optional, by default, so if they are set to the // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { + if !c.VerifyExpiresAt(now, false) { delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) vErr.Inner = fmt.Errorf("token is expired by %v", delta) vErr.Errors |= ValidationErrorExpired } - if c.VerifyIssuedAt(now, false) == false { + if !c.VerifyIssuedAt(now, false) { vErr.Inner = fmt.Errorf("Token used before issued") vErr.Errors |= ValidationErrorIssuedAt } - if c.VerifyNotBefore(now, false) == false { + if !c.VerifyNotBefore(now, false) { vErr.Inner = fmt.Errorf("token is not valid yet") vErr.Errors |= ValidationErrorNotValidYet } @@ -61,7 +61,7 @@ func (c StandardClaims) Valid() error { // Compares the aud claim against cmp. // If required is false, this method will return true if the value matches or is unset func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) + return verifyAud([]string{c.Audience}, cmp, req) } // Compares the exp claim against cmp. @@ -94,13 +94,23 @@ func verifyAud(aud []string, cmp string, required bool) bool { if len(aud) == 0 { return !required } + // use a var here to keep constant time compare when looping over a number of claims + result := false + var stringClaims string for _, a := range aud { if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { - return true + result = true } + stringClaims = stringClaims + a } - return false + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result } func verifyExp(exp int64, now int64, required bool) bool { diff --git a/vendor/github.com/form3tech-oss/jwt-go/doc.go b/vendor/github.com/golang-jwt/jwt/doc.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/doc.go rename to vendor/github.com/golang-jwt/jwt/doc.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go b/vendor/github.com/golang-jwt/jwt/ecdsa.go similarity index 84% rename from vendor/github.com/form3tech-oss/jwt-go/ecdsa.go rename to vendor/github.com/golang-jwt/jwt/ecdsa.go index f977381240..15e23435df 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/ecdsa.go +++ b/vendor/github.com/golang-jwt/jwt/ecdsa.go @@ -88,11 +88,11 @@ func (m *SigningMethodECDSA) Verify(signingString, signature string, key interfa hasher.Write([]byte(signingString)) // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { return nil - } else { - return ErrECDSAVerification } + + return ErrECDSAVerification } // Implements the Sign method from SigningMethod @@ -128,18 +128,12 @@ func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string keyBytes += 1 } - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. return EncodeSegment(out), nil } else { diff --git a/vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/ecdsa_utils.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go rename to vendor/github.com/golang-jwt/jwt/ecdsa_utils.go diff --git a/vendor/github.com/golang-jwt/jwt/ed25519.go b/vendor/github.com/golang-jwt/jwt/ed25519.go new file mode 100644 index 0000000000..a2f8ddbe9b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ed25519.go @@ -0,0 +1,81 @@ +package jwt + +import ( + "errors" + + "crypto/ed25519" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// Implements the EdDSA family +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key ed25519.PrivateKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PrivateKey); !ok { + return "", ErrInvalidKeyType + } + + // ed25519.Sign panics if private key not equal to ed25519.PrivateKeySize + // this allows to avoid recover usage + if len(ed25519Key) != ed25519.PrivateKeySize { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + sig := ed25519.Sign(ed25519Key, []byte(signingString)) + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go new file mode 100644 index 0000000000..c6357275ef --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("Key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("Key is not a valid Ed25519 public key") +) + +// Parse PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// Parse PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/form3tech-oss/jwt-go/errors.go b/vendor/github.com/golang-jwt/jwt/errors.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/errors.go rename to vendor/github.com/golang-jwt/jwt/errors.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/hmac.go b/vendor/github.com/golang-jwt/jwt/hmac.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/hmac.go rename to vendor/github.com/golang-jwt/jwt/hmac.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go b/vendor/github.com/golang-jwt/jwt/map_claims.go similarity index 96% rename from vendor/github.com/form3tech-oss/jwt-go/map_claims.go rename to vendor/github.com/golang-jwt/jwt/map_claims.go index 14b434cef6..72c79f92e5 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/map_claims.go +++ b/vendor/github.com/golang-jwt/jwt/map_claims.go @@ -10,11 +10,13 @@ import ( // This is the default claims type if you don't supply one type MapClaims map[string]interface{} -// Compares the aud claim against cmp. +// VerifyAudience Compares the aud claim against cmp. // If required is false, this method will return true if the value matches or is unset func (m MapClaims) VerifyAudience(cmp string, req bool) bool { var aud []string switch v := m["aud"].(type) { + case string: + aud = append(aud, v) case []string: aud = v case []interface{}: @@ -25,10 +27,6 @@ func (m MapClaims) VerifyAudience(cmp string, req bool) bool { } aud = append(aud, vs) } - case string: - aud = append(aud, v) - default: - return false } return verifyAud(aud, cmp, req) } @@ -67,7 +65,7 @@ func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { return false } -// Compares the iss claim against cmp.`` +// Compares the iss claim against cmp. // If required is false, this method will return true if the value matches or is unset func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { iss, _ := m["iss"].(string) diff --git a/vendor/github.com/form3tech-oss/jwt-go/none.go b/vendor/github.com/golang-jwt/jwt/none.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/none.go rename to vendor/github.com/golang-jwt/jwt/none.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/parser.go b/vendor/github.com/golang-jwt/jwt/parser.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/parser.go rename to vendor/github.com/golang-jwt/jwt/parser.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa.go b/vendor/github.com/golang-jwt/jwt/rsa.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/rsa.go rename to vendor/github.com/golang-jwt/jwt/rsa.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/rsa_pss.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go rename to vendor/github.com/golang-jwt/jwt/rsa_pss.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/rsa_utils.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go rename to vendor/github.com/golang-jwt/jwt/rsa_utils.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/signing_method.go b/vendor/github.com/golang-jwt/jwt/signing_method.go similarity index 100% rename from vendor/github.com/form3tech-oss/jwt-go/signing_method.go rename to vendor/github.com/golang-jwt/jwt/signing_method.go diff --git a/vendor/github.com/form3tech-oss/jwt-go/token.go b/vendor/github.com/golang-jwt/jwt/token.go similarity index 93% rename from vendor/github.com/form3tech-oss/jwt-go/token.go rename to vendor/github.com/golang-jwt/jwt/token.go index d637e0867c..6b30ced120 100644 --- a/vendor/github.com/form3tech-oss/jwt-go/token.go +++ b/vendor/github.com/golang-jwt/jwt/token.go @@ -65,7 +65,7 @@ func (t *Token) SignedString(key interface{}) (string, error) { func (t *Token) SigningString() (string, error) { var err error parts := make([]string, 2) - for i, _ := range parts { + for i := range parts { var jsonValue []byte if i == 0 { if jsonValue, err = json.Marshal(t.Header); err != nil { @@ -95,14 +95,10 @@ func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token // Encode JWT specific base64url encoding with padding stripped func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") + return base64.RawURLEncoding.EncodeToString(seg) } // Decode JWT specific base64url encoding with padding stripped func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) + return base64.RawURLEncoding.DecodeString(seg) } diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md index 01b21646e0..f5d551ca8f 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/README.md +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -36,9 +36,23 @@ The part in the middle is the interesting bit. It's called the Claims and conta This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + ## Examples -See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: * [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) * [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) @@ -46,14 +60,15 @@ See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) fo ## Extensions -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. -A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs). +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. -| Extension | Purpose | Repo | -|-----------|----------------------------------------------------------------------------------------------|--------------------------------------------| -| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | -| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | *Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 0000000000..b08402c342 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go index 5a8502feb3..4fd6f9e610 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -1,3 +1,4 @@ +//go:build go1.4 // +build go1.4 package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go index 09b4cde5ae..3cb0f3f0e4 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/token.go +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -7,7 +7,6 @@ import ( "time" ) - // DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 // states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations // of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go index 2c647fd2e6..ac8e140eb1 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/types.go +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -53,9 +53,23 @@ func (date NumericDate) MarshalJSON() (b []byte, err error) { if TimePrecision < time.Second { prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) } - f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) - - return []byte(strconv.FormatFloat(f, 'f', prec, 64)), nil + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil } // UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a diff --git a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md index 738c132e3c..03991f6103 100644 --- a/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md +++ b/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -2,6 +2,28 @@ ## HEAD +### Integration + + * Breaking change to API for `integration.HammerCTLog`: + * Added `ctx` as first argument, and terminate loop if it becomes cancelled + +### JSONClient + + * PostAndParseWithRetry now does backoff-and-retry upon receiving HTTP 429. + +### Cleanup + + * `WithBalancerName` is deprecated and removed, using the recommended way + * `ctfe.PEMCertPool` type has been moved to `x509util.PEMCertPool` to reduce + dependencies (#903). + +### Misc + + * update `google.golang.org/grpc` to v1.46.0 + * `ctclient` tool now uses Cobra for better CLI experience (#901). + * #800: Remove dependency from `ratelimit`. + * #927: Add read-only mode to CTFE config. + ## v1.1.2 ### CTFE @@ -13,7 +35,6 @@ * Trillian from v1.3.11 to v1.4.0 * protobuf to v2 - ## v1.1.1 [Published 2020-10-06](https://github.com/google/certificate-transparency-go/releases/tag/v1.1.1) diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml index 98186ba7d2..e1775c2ec0 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -70,8 +70,8 @@ steps: - '-ec' - | go install \ - google.golang.org/protobuf/proto \ - google.golang.org/protobuf/cmd/protoc-gen-go \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ github.com/golang/mock/mockgen \ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ github.com/fullstorydev/grpcurl/cmd/grpcurl @@ -188,7 +188,7 @@ steps: name: gcr.io/cloud-builders/kubectl args: - apply - - --server-dry-run + - --dry-run=server - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml index 2c893566ee..513bf0d020 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -70,8 +70,8 @@ steps: - '-ec' - | go install \ - google.golang.org/protobuf/proto \ - google.golang.org/protobuf/protoc-gen-go \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ github.com/golang/mock/mockgen \ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ github.com/fullstorydev/grpcurl/cmd/grpcurl diff --git a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml index 7899234d0e..d6694b1fe2 100644 --- a/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml +++ b/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml @@ -70,8 +70,8 @@ steps: - '-ec' - | go install \ - google.golang.org/protobuf/proto \ - google.golang.org/protobuf/protoc-gen-go \ + github.com/golang/protobuf/proto \ + github.com/golang/protobuf/protoc-gen-go \ github.com/golang/mock/mockgen \ go.etcd.io/etcd/v3 go.etcd.io/etcd/etcdctl/v3 \ github.com/fullstorydev/grpcurl/cmd/grpcurl diff --git a/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go new file mode 100644 index 0000000000..0f814ad54a --- /dev/null +++ b/vendor/github.com/google/certificate-transparency-go/x509util/pem_cert_pool.go @@ -0,0 +1,120 @@ +// Copyright 2016 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package x509util + +import ( + "crypto/sha256" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + + "github.com/golang/glog" + "github.com/google/certificate-transparency-go/x509" +) + +// String for certificate blocks in BEGIN / END PEM headers +const pemCertificateBlockType string = "CERTIFICATE" + +// PEMCertPool is a wrapper / extension to x509.CertPool. It allows us to access the +// raw certs, which we need to serve get-roots request and has stricter handling on loading +// certs into the pool. CertPool ignores errors if at least one cert loads correctly but +// PEMCertPool requires all certs to load. +type PEMCertPool struct { + // maps from sha-256 to certificate, used for dup detection + fingerprintToCertMap map[[sha256.Size]byte]x509.Certificate + rawCerts []*x509.Certificate + certPool *x509.CertPool +} + +// NewPEMCertPool creates a new, empty, instance of PEMCertPool. +func NewPEMCertPool() *PEMCertPool { + return &PEMCertPool{fingerprintToCertMap: make(map[[sha256.Size]byte]x509.Certificate), certPool: x509.NewCertPool()} +} + +// AddCert adds a certificate to a pool. Uses fingerprint to weed out duplicates. +// cert must not be nil. +func (p *PEMCertPool) AddCert(cert *x509.Certificate) { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + + if !ok { + p.fingerprintToCertMap[fingerprint] = *cert + p.certPool.AddCert(cert) + p.rawCerts = append(p.rawCerts, cert) + } +} + +// Included indicates whether the given cert is included in the pool. +func (p *PEMCertPool) Included(cert *x509.Certificate) bool { + fingerprint := sha256.Sum256(cert.Raw) + _, ok := p.fingerprintToCertMap[fingerprint] + return ok +} + +// AppendCertsFromPEM adds certs to the pool from a byte slice assumed to contain PEM encoded data. +// Skips over non certificate blocks in the data. Returns true if all certificates in the +// data were parsed and added to the pool successfully and at least one certificate was found. +func (p *PEMCertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != pemCertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if x509.IsFatal(err) { + glog.Warningf("error parsing PEM certificate: %v", err) + return false + } + + p.AddCert(cert) + ok = true + } + + return +} + +// AppendCertsFromPEMFile adds certs from a file that contains concatenated PEM data. +func (p *PEMCertPool) AppendCertsFromPEMFile(pemFile string) error { + pemData, err := ioutil.ReadFile(pemFile) + if err != nil { + return fmt.Errorf("failed to load PEM certs file: %v", err) + } + + if !p.AppendCertsFromPEM(pemData) { + return errors.New("failed to parse PEM certs file") + } + return nil +} + +// Subjects returns a list of the DER-encoded subjects of all of the certificates in the pool. +func (p *PEMCertPool) Subjects() (res [][]byte) { + return p.certPool.Subjects() +} + +// CertPool returns the underlying CertPool. +func (p *PEMCertPool) CertPool() *x509.CertPool { + return p.certPool +} + +// RawCertificates returns a list of the raw bytes of certificates that are in this pool +func (p *PEMCertPool) RawCertificates() []*x509.Certificate { + return p.rawCerts +} diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go index dc9c56b7f3..b2e3f186cc 100644 --- a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go +++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -17,6 +17,8 @@ package redact import ( "context" + "errors" + "net/url" ) type contextKey string @@ -33,3 +35,55 @@ func FromContext(ctx context.Context) (bool, string) { reason, ok := ctx.Value(redactKey).(string) return ok, reason } + +// Error redacts potentially sensitive query parameter values in the URL from the error's message. +// +// If the error is a *url.Error, this returns a *url.Error with the URL redacted. +// Any other error type, or nil, is returned unchanged. +func Error(err error) error { + // If the error is a url.Error, we can redact the URL. + // Otherwise (including if err is nil), we can't redact. + var uerr *url.Error + if ok := errors.As(err, &uerr); !ok { + return err + } + u, perr := url.Parse(uerr.URL) + if perr != nil { + return err // If the URL can't be parsed, just return the original error. + } + uerr.URL = URL(u).String() // Update the URL to the redacted URL. + return uerr +} + +// The set of query string keys that we expect to send as part of the registry +// protocol. Anything else is potentially dangerous to leak, as it's probably +// from a redirect. These redirects often included tokens or signed URLs. +var paramAllowlist = map[string]struct{}{ + // Token exchange + "scope": {}, + "service": {}, + // Cross-repo mounting + "mount": {}, + "from": {}, + // Layer PUT + "digest": {}, + // Listing tags and catalog + "n": {}, + "last": {}, +} + +// URL redacts potentially sensitive query parameter values from the URL's query string. +func URL(u *url.URL) *url.URL { + qs := u.Query() + for k, v := range qs { + for i := range v { + if _, ok := paramAllowlist[k]; !ok { + // key is not in the Allowlist + v[i] = "REDACTED" + } + } + } + r := *u + r.RawQuery = qs.Encode() + return &r +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index e465aef491..c4a2e693e3 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -15,16 +15,14 @@ package name import ( + _ "crypto/sha256" // Recommended by go-digest. "strings" -) -const ( - // These have the form: sha256: - // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. - digestChars = "sh:0123456789abcdef" - digestDelim = "@" + "github.com/opencontainers/go-digest" ) +const digestDelim = "@" + // Digest stores a digest name in a structured form. type Digest struct { Repository @@ -60,10 +58,6 @@ func (d Digest) String() string { return d.original } -func checkDigest(name string) error { - return checkElement("digest", name, digestChars, 7+64, 7+64) -} - // NewDigest returns a new Digest representing the given name. func NewDigest(name string, opts ...Option) (Digest, error) { // Split on "@" @@ -72,10 +66,13 @@ func NewDigest(name string, opts ...Option) (Digest, error) { return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) } base := parts[0] - digest := parts[1] - - // Always check that the digest is valid. - if err := checkDigest(digest); err != nil { + dig := parts[1] + prefix := digest.Canonical.String() + ":" + if !strings.HasPrefix(dig, prefix) { + return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) + } + hex := strings.TrimPrefix(dig, prefix) + if err := digest.Canonical.Validate(hex); err != nil { return Digest{}, err } @@ -90,7 +87,7 @@ func NewDigest(name string, opts ...Option) (Digest, error) { } return Digest{ Repository: repo, - digest: digest, + digest: dig, original: name, }, nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go b/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go index 83040767d0..31d9c935c1 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/cache/cache.go @@ -156,3 +156,39 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { } return l, err } + +// ImageIndex returns a new ImageIndex which wraps the given ImageIndex's +// children with either Image(child, c) or ImageIndex(child, c) depending on type. +func ImageIndex(ii v1.ImageIndex, c Cache) v1.ImageIndex { + return &imageIndex{ + inner: ii, + c: c, + } +} + +type imageIndex struct { + inner v1.ImageIndex + c Cache +} + +func (ii *imageIndex) MediaType() (types.MediaType, error) { return ii.inner.MediaType() } +func (ii *imageIndex) Digest() (v1.Hash, error) { return ii.inner.Digest() } +func (ii *imageIndex) Size() (int64, error) { return ii.inner.Size() } +func (ii *imageIndex) IndexManifest() (*v1.IndexManifest, error) { return ii.inner.IndexManifest() } +func (ii *imageIndex) RawManifest() ([]byte, error) { return ii.inner.RawManifest() } + +func (ii *imageIndex) Image(h v1.Hash) (v1.Image, error) { + i, err := ii.inner.Image(h) + if err != nil { + return nil, err + } + return Image(i, ii.c), nil +} + +func (ii *imageIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + idx, err := ii.inner.ImageIndex(h) + if err != nil { + return nil, err + } + return ImageIndex(idx, ii.c), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index a950b397c1..40b1607789 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -37,6 +37,7 @@ type ConfigFile struct { RootFS RootFS `json:"rootfs"` Config Config `json:"config"` OSVersion string `json:"os.version,omitempty"` + Variant string `json:"variant,omitempty"` } // History is one entry of a list recording how this container image was built. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go index 482cf4a913..6dc7a50eac 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/google/keychain.go @@ -71,11 +71,13 @@ func (gk *googleKeychain) Resolve(target authn.Resource) (authn.Authenticator, e func resolve() authn.Authenticator { auth, envErr := NewEnvAuthenticator() if envErr == nil && auth != authn.Anonymous { + logs.Debug.Println("google.Keychain: using Application Default Credentials") return auth } auth, gErr := NewGcloudAuthenticator() if gErr == nil && auth != authn.Anonymous { + logs.Debug.Println("google.Keychain: using gcloud fallback") return auth } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go index 7c54e5f58b..bc365a6f35 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go @@ -45,33 +45,17 @@ func (l Path) AppendImage(img v1.Image, options ...Option) error { return err } - mt, err := img.MediaType() + desc, err := partial.Descriptor(img) if err != nil { return err } - d, err := img.Digest() - if err != nil { - return err - } - - manifest, err := img.RawManifest() - if err != nil { - return err - } - - desc := v1.Descriptor{ - MediaType: mt, - Size: int64(len(manifest)), - Digest: d, - } - o := makeOptions(options...) for _, opt := range o.descOpts { - opt(&desc) + opt(desc) } - return l.AppendDescriptor(desc) + return l.AppendDescriptor(*desc) } // AppendIndex writes a v1.ImageIndex to the Path and updates @@ -81,33 +65,17 @@ func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error { return err } - mt, err := ii.MediaType() + desc, err := partial.Descriptor(ii) if err != nil { return err } - d, err := ii.Digest() - if err != nil { - return err - } - - manifest, err := ii.RawManifest() - if err != nil { - return err - } - - desc := v1.Descriptor{ - MediaType: mt, - Size: int64(len(manifest)), - Digest: d, - } - o := makeOptions(options...) for _, opt := range o.descOpts { - opt(&desc) + opt(desc) } - return l.AppendDescriptor(desc) + return l.AppendDescriptor(*desc) } // AppendDescriptor adds a descriptor to the index.json of the Path. @@ -492,12 +460,15 @@ func (l Path) WriteIndex(ii v1.ImageIndex) error { // // The contents are written in the following format: // At the top level, there is: -// One oci-layout file containing the version of this image-layout. -// One index.json file listing descriptors for the contained images. +// +// One oci-layout file containing the version of this image-layout. +// One index.json file listing descriptors for the contained images. +// // Under blobs/, there is, for each image: -// One file for each layer, named after the layer's SHA. -// One file for each config blob, named after its SHA. -// One file for each manifest blob, named after its SHA. +// +// One file for each layer, named after the layer's SHA. +// One file for each config blob, named after its SHA. +// One file for each manifest blob, named after its SHA. func Write(path string, ii v1.ImageIndex) (Path, error) { lp := Path(path) // Always just write oci-layout file, since it's small. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go index 3a5c615722..b64e9881ee 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -88,9 +88,22 @@ func (cl *configLayer) MediaType() (types.MediaType, error) { var _ v1.Layer = (*configLayer)(nil) +// withConfigLayer allows partial image implementations to provide a layer +// for their config file. +type withConfigLayer interface { + ConfigLayer() (v1.Layer, error) +} + // ConfigLayer implements v1.Layer from the raw config bytes. // This is so that clients (e.g. remote) can access the config as a blob. +// +// Images that want to return a specific layer implementation can implement +// withConfigLayer. func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + if wcl, ok := unwrap(i).(withConfigLayer); ok { + return wcl.ConfigLayer() + } + h, err := ConfigName(i) if err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go index 25d86956f1..8a0a6ca7b0 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -24,7 +24,7 @@ func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTrip } scopes := []string{ref.Scope(transport.PushScope)} - tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + tr, err := transport.NewWithContext(context.TODO(), ref.Context().Registry, auth, t, scopes) if err != nil { return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err) } @@ -39,7 +39,7 @@ func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTrip client: &http.Client{Transport: tr}, context: context.Background(), } - loc, _, err := w.initiateUpload("", "") + loc, _, err := w.initiateUpload("", "", "") if loc != "" { // Since we're only initiating the upload to check whether we // can, we should attempt to cancel it, in case initiating diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go index 755e819293..cbe1268e02 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -24,6 +24,7 @@ import ( "net/url" "strings" + "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/internal/verify" "github.com/google/go-containerregistry/pkg/logs" "github.com/google/go-containerregistry/pkg/name" @@ -367,7 +368,7 @@ func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.Read resp, err := f.Client.Do(req.WithContext(ctx)) if err != nil { - return nil, err + return nil, redact.Error(err) } if err := transport.CheckError(resp, http.StatusOK); err != nil { @@ -398,7 +399,7 @@ func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { resp, err := f.Client.Do(req.WithContext(f.context)) if err != nil { - return nil, err + return nil, redact.Error(err) } if err := transport.CheckError(resp, http.StatusOK); err != nil { @@ -418,7 +419,7 @@ func (f *fetcher) blobExists(h v1.Hash) (bool, error) { resp, err := f.Client.Do(req.WithContext(f.context)) if err != nil { - return false, err + return false, redact.Error(err) } defer resp.Body.Close() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go index 728997044c..36d088567d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -93,3 +93,16 @@ func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { return partial.Descriptor(mi.Image) } + +// ConfigLayer retains the original reference so that it can be mounted. +// See partial.ConfigLayer. +func (mi *mountableImage) ConfigLayer() (v1.Layer, error) { + l, err := partial.ConfigLayer(mi.Image) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go index 7e41d94c43..002ef8587b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -87,32 +87,32 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { return err } w := writer{ - repo: repo, - client: &http.Client{Transport: tr}, - context: o.context, - updates: o.updates, - lastUpdate: &v1.Update{}, - backoff: o.retryBackoff, - predicate: o.retryPredicate, + repo: repo, + client: &http.Client{Transport: tr}, + context: o.context, + backoff: o.retryBackoff, + predicate: o.retryPredicate, } // Collect the total size of blobs and manifests we're about to write. if o.updates != nil { + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} defer close(o.updates) - defer func() { _ = sendError(o.updates, rerr) }() + defer func() { _ = w.progress.err(rerr) }() for _, b := range blobs { size, err := b.Size() if err != nil { return err } - w.lastUpdate.Total += size + w.progress.total(size) } countManifest := func(t Taggable) error { b, err := t.RawManifest() if err != nil { return err } - w.lastUpdate.Total += int64(len(b)) + w.progress.total(int64(len(b))) return nil } for _, i := range images { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go new file mode 100644 index 0000000000..1f4396350a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go @@ -0,0 +1,69 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + "sync" + "sync/atomic" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type progress struct { + sync.Mutex + updates chan<- v1.Update + lastUpdate *v1.Update +} + +func (p *progress) total(delta int64) { + atomic.AddInt64(&p.lastUpdate.Total, delta) +} + +func (p *progress) complete(delta int64) { + p.Lock() + defer p.Unlock() + p.updates <- v1.Update{ + Total: p.lastUpdate.Total, + Complete: atomic.AddInt64(&p.lastUpdate.Complete, delta), + } +} + +func (p *progress) err(err error) error { + if err != nil && p.updates != nil { + p.updates <- v1.Update{Error: err} + } + return err +} + +type progressReader struct { + rc io.ReadCloser + + count *int64 // number of bytes this reader has read, to support resetting on retry. + progress *progress +} + +func (r *progressReader) Read(b []byte) (int, error) { + n, err := r.rc.Read(b) + if err != nil { + return n, err + } + atomic.AddInt64(r.count, int64(n)) + // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is canceled. + r.progress.complete(int64(n)) + return n, nil +} + +func (r *progressReader) Close() error { return r.rc.Close() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index ce4e707261..0e8f783522 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -233,7 +233,9 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { v := url.Values{} v.Set("scope", strings.Join(bt.scopes, " ")) - v.Set("service", bt.service) + if bt.service != "" { + v.Set("service", bt.service) + } v.Set("client_id", defaultUserAgent) if auth.IdentityToken != "" { v.Set("grant_type", "refresh_token") diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index b94b180ccf..f059f77b6d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -19,26 +19,10 @@ import ( "fmt" "io/ioutil" "net/http" - "net/url" "strings" -) -// The set of query string keys that we expect to send as part of the registry -// protocol. Anything else is potentially dangerous to leak, as it's probably -// from a redirect. These redirects often included tokens or signed URLs. -var paramAllowlist = map[string]struct{}{ - // Token exchange - "scope": {}, - "service": {}, - // Cross-repo mounting - "mount": {}, - "from": {}, - // Layer PUT - "digest": {}, - // Listing tags and catalog - "n": {}, - "last": {}, -} + "github.com/google/go-containerregistry/internal/redact" +) // Error implements error to support the following error specification: // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors @@ -59,7 +43,7 @@ var _ error = (*Error)(nil) func (e *Error) Error() string { prefix := "" if e.Request != nil { - prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redactURL(e.Request.URL)) + prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redact.URL(e.Request.URL)) } return prefix + e.responseErr() } @@ -100,22 +84,6 @@ func (e *Error) Temporary() bool { return true } -// TODO(jonjohnsonjr): Consider moving to internal/redact. -func redactURL(original *url.URL) *url.URL { - qs := original.Query() - for k, v := range qs { - for i := range v { - if _, ok := paramAllowlist[k]; !ok { - // key is not in the Allowlist - v[i] = "REDACTED" - } - } - } - redacted := *original - redacted.RawQuery = qs.Encode() - return &redacted -} - // Diagnostic represents a single error returned by a Docker registry interaction. type Diagnostic struct { Code ErrorCode `json:"code"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go index 121904df3a..01fe1fa820 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -27,15 +27,24 @@ import ( // setup to authenticate with the remote registry "reg", in the capacity // laid out by the specified scopes. // -// TODO(jonjohnsonjr): Deprecate this. +// Deprecated: Use NewWithContext. func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { return NewWithContext(context.Background(), reg, auth, t, scopes) } // NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been -// setup to authenticate with the remote registry "reg", in the capacity +// set up to authenticate with the remote registry "reg", in the capacity // laid out by the specified scopes. +// In case the RoundTripper is already of the type Wrapper it assumes +// authentication was already done prior to this call, so it just returns +// the provided RoundTripper without further action func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // When the transport provided is of the type Wrapper this function assumes that the caller already + // executed the necessary login and check. + switch t.(type) { + case *Wrapper: + return t, nil + } // The handshake: // 1. Use "t" to ping() the registry for the authentication challenge. // @@ -76,12 +85,7 @@ func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authentic if !ok { return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) } - service, ok := pr.parameters["service"] - if !ok { - // If the service parameter is not specified, then default it to the registry - // with which we are talking. - service = reg.String() - } + service := pr.parameters["service"] bt := &bearerTransport{ inner: t, basic: auth, diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index d412f953c3..137349bf21 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -23,7 +23,6 @@ import ( "net/http" "net/url" "strings" - "sync/atomic" "github.com/google/go-containerregistry/internal/redact" "github.com/google/go-containerregistry/internal/retry" @@ -49,20 +48,21 @@ func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { return err } - var lastUpdate *v1.Update + var p *progress if o.updates != nil { - lastUpdate = &v1.Update{} - lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) + p = &progress{updates: o.updates} + p.lastUpdate = &v1.Update{} + p.lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) if err != nil { return err } defer close(o.updates) - defer func() { _ = sendError(o.updates, rerr) }() + defer func() { _ = p.err(rerr) }() } - return writeImage(o.context, ref, img, o, lastUpdate) + return writeImage(o.context, ref, img, o, p) } -func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, lastUpdate *v1.Update) error { +func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, progress *progress) error { ls, err := img.Layers() if err != nil { return err @@ -73,13 +73,12 @@ func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *option return err } w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: ctx, - updates: o.updates, - lastUpdate: lastUpdate, - backoff: o.retryBackoff, - predicate: o.retryPredicate, + repo: ref.Context(), + client: &http.Client{Transport: tr}, + context: ctx, + progress: progress, + backoff: o.retryBackoff, + predicate: o.retryPredicate, } // Upload individual blobs and collect any errors. @@ -174,17 +173,9 @@ type writer struct { client *http.Client context context.Context - updates chan<- v1.Update - lastUpdate *v1.Update - backoff Backoff - predicate retry.Predicate -} - -func sendError(ch chan<- v1.Update, err error) error { - if err != nil && ch != nil { - ch <- v1.Update{Error: err} - } - return err + progress *progress + backoff Backoff + predicate retry.Predicate } // url returns a url.Url for the specified path in the context of this remote image reference. @@ -267,13 +258,16 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err // On success, the layer was either mounted (nothing more to do) or a blob // upload was initiated and the body of that blob should be sent to the returned // location. -func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { +func (w *writer) initiateUpload(from, mount, origin string) (location string, mounted bool, err error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) uv := url.Values{} if mount != "" && from != "" { // Quay will fail if we specify a "mount" without a "from". - uv["mount"] = []string{mount} - uv["from"] = []string{from} + uv.Set("mount", mount) + uv.Set("from", from) + if origin != "" { + uv.Set("origin", origin) + } } u.RawQuery = uv.Encode() @@ -290,6 +284,11 @@ func (w *writer) initiateUpload(from, mount string) (location string, mounted bo defer resp.Body.Close() if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + if origin != "" && origin != w.repo.RegistryStr() { + // https://github.com/google/go-containerregistry/issues/1404 + logs.Warn.Printf("retrying without mount: %v", err) + return w.initiateUpload("", "", "") + } return "", false, err } @@ -307,46 +306,34 @@ func (w *writer) initiateUpload(from, mount string) (location string, mounted bo } } -type progressReader struct { - rc io.ReadCloser - - count *int64 // number of bytes this reader has read, to support resetting on retry. - updates chan<- v1.Update - lastUpdate *v1.Update -} - -func (r *progressReader) Read(b []byte) (int, error) { - n, err := r.rc.Read(b) - if err != nil { - return n, err - } - atomic.AddInt64(r.count, int64(n)) - // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is cancelled. - r.updates <- v1.Update{ - Total: r.lastUpdate.Total, - Complete: atomic.AddInt64(&r.lastUpdate.Complete, int64(n)), - } - return n, nil -} - -func (r *progressReader) Close() error { return r.rc.Close() } - // streamBlob streams the contents of the blob to the specified location. // On failure, this will return an error. On success, this will return the location // header indicating how to commit the streamed blob. -func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocation string) (commitLocation string, rerr error) { +func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation string) (commitLocation string, rerr error) { reset := func() {} defer func() { if rerr != nil { reset() } }() - if w.updates != nil { + blob, err := layer.Compressed() + if err != nil { + return "", err + } + + getBody := layer.Compressed + if w.progress != nil { var count int64 - blob = &progressReader{rc: blob, updates: w.updates, lastUpdate: w.lastUpdate, count: &count} + blob = &progressReader{rc: blob, progress: w.progress, count: &count} + getBody = func() (io.ReadCloser, error) { + blob, err := layer.Compressed() + if err != nil { + return nil, err + } + return &progressReader{rc: blob, progress: w.progress, count: &count}, nil + } reset = func() { - atomic.AddInt64(&w.lastUpdate.Complete, -count) - w.updates <- *w.lastUpdate + w.progress.complete(-count) } } @@ -354,6 +341,10 @@ func (w *writer) streamBlob(ctx context.Context, blob io.ReadCloser, streamLocat if err != nil { return "", err } + if _, ok := layer.(*stream.Layer); !ok { + // We can't retry streaming layers. + req.GetBody = getBody + } req.Header.Set("Content-Type", "application/octet-stream") resp, err := w.client.Do(req.WithContext(ctx)) @@ -399,19 +390,16 @@ func (w *writer) commitBlob(location, digest string) error { // incrProgress increments and sends a progress update, if WithProgress is used. func (w *writer) incrProgress(written int64) { - if w.updates == nil { + if w.progress == nil { return } - w.updates <- v1.Update{ - Total: w.lastUpdate.Total, - Complete: atomic.AddInt64(&w.lastUpdate.Complete, written), - } + w.progress.complete(written) } // uploadOne performs a complete upload of a single layer. func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { tryUpload := func() error { - var from, mount string + var from, mount, origin string if h, err := l.Digest(); err == nil { // If we know the digest, this isn't a streaming layer. Do an existence // check so we can skip uploading the layer if possible. @@ -432,12 +420,11 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { mount = h.String() } if ml, ok := l.(*MountableLayer); ok { - if w.repo.RegistryStr() == ml.Reference.Context().RegistryStr() { - from = ml.Reference.Context().RepositoryStr() - } + from = ml.Reference.Context().RepositoryStr() + origin = ml.Reference.Context().RegistryStr() } - location, mounted, err := w.initiateUpload(from, mount) + location, mounted, err := w.initiateUpload(from, mount, origin) if err != nil { return err } else if mounted { @@ -465,11 +452,7 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { ctx = redact.NewContext(ctx, "omitting binary blobs from logs") } - blob, err := l.Compressed() - if err != nil { - return err - } - location, err = w.streamBlob(ctx, blob, location) + location, err = w.streamBlob(ctx, l, location) if err != nil { return err } @@ -531,7 +514,7 @@ func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.Image if err != nil { return err } - if err := writeImage(ctx, ref, img, o, w.lastUpdate); err != nil { + if err := writeImage(ctx, ref, img, o, w.progress); err != nil { return err } default: @@ -674,19 +657,21 @@ func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr e repo: ref.Context(), client: &http.Client{Transport: tr}, context: o.context, - updates: o.updates, backoff: o.retryBackoff, predicate: o.retryPredicate, } if o.updates != nil { - w.lastUpdate = &v1.Update{} - w.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} + + defer close(o.updates) + defer func() { w.progress.err(rerr) }() + + w.progress.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) if err != nil { return err } - defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() } return w.writeIndex(o.context, ref, ii, options...) @@ -815,14 +800,16 @@ func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr e repo: repo, client: &http.Client{Transport: tr}, context: o.context, - updates: o.updates, backoff: o.retryBackoff, predicate: o.retryPredicate, } if o.updates != nil { + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} + defer close(o.updates) - defer func() { sendError(o.updates, rerr) }() + defer func() { w.progress.err(rerr) }() // TODO: support streaming layers which update the total count as they write. if _, ok := layer.(*stream.Layer); ok { @@ -832,7 +819,7 @@ func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr e if err != nil { return err } - w.lastUpdate = &v1.Update{Total: size} + w.progress.total(size) } return w.uploadOne(o.context, layer) } diff --git a/vendor/github.com/google/go-github/v42/github/repos_merging.go b/vendor/github.com/google/go-github/v42/github/repos_merging.go deleted file mode 100644 index 7edda3efff..0000000000 --- a/vendor/github.com/google/go-github/v42/github/repos_merging.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2014 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" -) - -// RepositoryMergeRequest represents a request to merge a branch in a -// repository. -type RepositoryMergeRequest struct { - Base *string `json:"base,omitempty"` - Head *string `json:"head,omitempty"` - CommitMessage *string `json:"commit_message,omitempty"` -} - -// Merge a branch in the specified repository. -// -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#merge-a-branch -func (s *RepositoriesService) Merge(ctx context.Context, owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/merges", owner, repo) - req, err := s.client.NewRequest("POST", u, request) - if err != nil { - return nil, nil, err - } - - commit := new(RepositoryCommit) - resp, err := s.client.Do(ctx, req, commit) - if err != nil { - return nil, resp, err - } - - return commit, resp, nil -} diff --git a/vendor/github.com/google/go-github/v42/AUTHORS b/vendor/github.com/google/go-github/v45/AUTHORS similarity index 100% rename from vendor/github.com/google/go-github/v42/AUTHORS rename to vendor/github.com/google/go-github/v45/AUTHORS diff --git a/vendor/github.com/google/go-github/v42/LICENSE b/vendor/github.com/google/go-github/v45/LICENSE similarity index 100% rename from vendor/github.com/google/go-github/v42/LICENSE rename to vendor/github.com/google/go-github/v45/LICENSE diff --git a/vendor/github.com/google/go-github/v42/github/actions.go b/vendor/github.com/google/go-github/v45/github/actions.go similarity index 77% rename from vendor/github.com/google/go-github/v42/github/actions.go rename to vendor/github.com/google/go-github/v45/github/actions.go index ce15d95fae..8d552f2d0d 100644 --- a/vendor/github.com/google/go-github/v42/github/actions.go +++ b/vendor/github.com/google/go-github/v45/github/actions.go @@ -8,5 +8,5 @@ package github // ActionsService handles communication with the actions related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/ +// GitHub API docs: https://docs.github.com/en/rest/actions/ type ActionsService service diff --git a/vendor/github.com/google/go-github/v42/github/actions_artifacts.go b/vendor/github.com/google/go-github/v45/github/actions_artifacts.go similarity index 70% rename from vendor/github.com/google/go-github/v42/github/actions_artifacts.go rename to vendor/github.com/google/go-github/v45/github/actions_artifacts.go index 4aa7dc4404..3b9c83c490 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_artifacts.go +++ b/vendor/github.com/google/go-github/v45/github/actions_artifacts.go @@ -16,7 +16,7 @@ import ( // data between jobs in a workflow and provide storage for data // once a workflow is complete. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#artifacts +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts type Artifact struct { ID *int64 `json:"id,omitempty"` NodeID *string `json:"node_id,omitempty"` @@ -30,7 +30,7 @@ type Artifact struct { // ArtifactList represents a list of GitHub artifacts. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#artifacts +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#artifacts type ArtifactList struct { TotalCount *int64 `json:"total_count,omitempty"` Artifacts []*Artifact `json:"artifacts,omitempty"` @@ -38,7 +38,7 @@ type ArtifactList struct { // ListArtifacts lists all artifacts that belong to a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-artifacts-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#list-artifacts-for-a-repository func (s *ActionsService) ListArtifacts(ctx context.Context, owner, repo string, opts *ListOptions) (*ArtifactList, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/artifacts", owner, repo) u, err := addOptions(u, opts) @@ -62,7 +62,7 @@ func (s *ActionsService) ListArtifacts(ctx context.Context, owner, repo string, // ListWorkflowRunArtifacts lists all artifacts that belong to a workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-workflow-run-artifacts +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#list-workflow-run-artifacts func (s *ActionsService) ListWorkflowRunArtifacts(ctx context.Context, owner, repo string, runID int64, opts *ListOptions) (*ArtifactList, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/artifacts", owner, repo, runID) u, err := addOptions(u, opts) @@ -86,7 +86,7 @@ func (s *ActionsService) ListWorkflowRunArtifacts(ctx context.Context, owner, re // GetArtifact gets a specific artifact for a workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-an-artifact +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#get-an-artifact func (s *ActionsService) GetArtifact(ctx context.Context, owner, repo string, artifactID int64) (*Artifact, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v", owner, repo, artifactID) @@ -106,52 +106,27 @@ func (s *ActionsService) GetArtifact(ctx context.Context, owner, repo string, ar // DownloadArtifact gets a redirect URL to download an archive for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#download-an-artifact +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#download-an-artifact func (s *ActionsService) DownloadArtifact(ctx context.Context, owner, repo string, artifactID int64, followRedirects bool) (*url.URL, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v/zip", owner, repo, artifactID) - resp, err := s.getDownloadArtifactFromURL(ctx, u, followRedirects) + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) if err != nil { return nil, nil, err } + defer resp.Body.Close() if resp.StatusCode != http.StatusFound { return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) } + parsedURL, err := url.Parse(resp.Header.Get("Location")) return parsedURL, newResponse(resp), nil } -func (s *ActionsService) getDownloadArtifactFromURL(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, err - } - resp.Body.Close() - - // If redirect response is returned, follow it - if followRedirects && resp.StatusCode == http.StatusMovedPermanently { - u = resp.Header.Get("Location") - resp, err = s.getDownloadArtifactFromURL(ctx, u, false) - } - return resp, err -} - // DeleteArtifact deletes a workflow run artifact. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-an-artifact +// GitHub API docs: https://docs.github.com/en/rest/actions/artifacts#delete-an-artifact func (s *ActionsService) DeleteArtifact(ctx context.Context, owner, repo string, artifactID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/artifacts/%v", owner, repo, artifactID) diff --git a/vendor/github.com/google/go-github/v42/github/actions_runner_groups.go b/vendor/github.com/google/go-github/v45/github/actions_runner_groups.go similarity index 81% rename from vendor/github.com/google/go-github/v42/github/actions_runner_groups.go rename to vendor/github.com/google/go-github/v45/github/actions_runner_groups.go index 2d6a15463e..6d89249150 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_runner_groups.go +++ b/vendor/github.com/google/go-github/v45/github/actions_runner_groups.go @@ -61,10 +61,18 @@ type SetRunnerGroupRunnersRequest struct { Runners []int64 `json:"runners"` } +// ListOrgRunnerGroupOptions extend ListOptions to have the optional parameters VisibleToRepository. +type ListOrgRunnerGroupOptions struct { + ListOptions + + // Only return runner groups that are allowed to be used by this repository. + VisibleToRepository string `url:"visible_to_repository,omitempty"` +} + // ListOrganizationRunnerGroups lists all self-hosted runner groups configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-self-hosted-runner-groups-for-an-organization -func (s *ActionsService) ListOrganizationRunnerGroups(ctx context.Context, org string, opts *ListOptions) (*RunnerGroups, *Response, error) { +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-self-hosted-runner-groups-for-an-organization +func (s *ActionsService) ListOrganizationRunnerGroups(ctx context.Context, org string, opts *ListOrgRunnerGroupOptions) (*RunnerGroups, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups", org) u, err := addOptions(u, opts) if err != nil { @@ -87,7 +95,7 @@ func (s *ActionsService) ListOrganizationRunnerGroups(ctx context.Context, org s // GetOrganizationRunnerGroup gets a specific self-hosted runner group for an organization using its RunnerGroup ID. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#get-a-self-hosted-runner-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#get-a-self-hosted-runner-group-for-an-organization func (s *ActionsService) GetOrganizationRunnerGroup(ctx context.Context, org string, groupID int64) (*RunnerGroup, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) req, err := s.client.NewRequest("GET", u, nil) @@ -106,7 +114,7 @@ func (s *ActionsService) GetOrganizationRunnerGroup(ctx context.Context, org str // DeleteOrganizationRunnerGroup deletes a self-hosted runner group from an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#delete-a-self-hosted-runner-group-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#delete-a-self-hosted-runner-group-from-an-organization func (s *ActionsService) DeleteOrganizationRunnerGroup(ctx context.Context, org string, groupID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) @@ -120,7 +128,7 @@ func (s *ActionsService) DeleteOrganizationRunnerGroup(ctx context.Context, org // CreateOrganizationRunnerGroup creates a new self-hosted runner group for an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#create-a-self-hosted-runner-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#create-a-self-hosted-runner-group-for-an-organization func (s *ActionsService) CreateOrganizationRunnerGroup(ctx context.Context, org string, createReq CreateRunnerGroupRequest) (*RunnerGroup, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups", org) req, err := s.client.NewRequest("POST", u, createReq) @@ -139,7 +147,7 @@ func (s *ActionsService) CreateOrganizationRunnerGroup(ctx context.Context, org // UpdateOrganizationRunnerGroup updates a self-hosted runner group for an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#update-a-self-hosted-runner-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#update-a-self-hosted-runner-group-for-an-organization func (s *ActionsService) UpdateOrganizationRunnerGroup(ctx context.Context, org string, groupID int64, updateReq UpdateRunnerGroupRequest) (*RunnerGroup, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v", org, groupID) req, err := s.client.NewRequest("PATCH", u, updateReq) @@ -158,7 +166,7 @@ func (s *ActionsService) UpdateOrganizationRunnerGroup(ctx context.Context, org // ListRepositoryAccessRunnerGroup lists the repositories with access to a self-hosted runner group configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-repository-access-to-a-self-hosted-runner-group-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-repository-access-to-a-self-hosted-runner-group-in-an-organization func (s *ActionsService) ListRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID int64, opts *ListOptions) (*ListRepositories, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories", org, groupID) u, err := addOptions(u, opts) @@ -183,7 +191,7 @@ func (s *ActionsService) ListRepositoryAccessRunnerGroup(ctx context.Context, or // SetRepositoryAccessRunnerGroup replaces the list of repositories that have access to a self-hosted runner group configured in an organization // with a new List of repositories. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#set-repository-access-for-a-self-hosted-runner-group-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#set-repository-access-for-a-self-hosted-runner-group-in-an-organization func (s *ActionsService) SetRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID int64, ids SetRepoAccessRunnerGroupRequest) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories", org, groupID) @@ -198,7 +206,7 @@ func (s *ActionsService) SetRepositoryAccessRunnerGroup(ctx context.Context, org // AddRepositoryAccessRunnerGroup adds a repository to the list of selected repositories that can access a self-hosted runner group. // The runner group must have visibility set to 'selected'. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#add-repository-access-to-a-self-hosted-runner-group-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#add-repository-access-to-a-self-hosted-runner-group-in-an-organization func (s *ActionsService) AddRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID, repoID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories/%v", org, groupID, repoID) @@ -213,7 +221,7 @@ func (s *ActionsService) AddRepositoryAccessRunnerGroup(ctx context.Context, org // RemoveRepositoryAccessRunnerGroup removes a repository from the list of selected repositories that can access a self-hosted runner group. // The runner group must have visibility set to 'selected'. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#remove-repository-access-to-a-self-hosted-runner-group-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#remove-repository-access-to-a-self-hosted-runner-group-in-an-organization func (s *ActionsService) RemoveRepositoryAccessRunnerGroup(ctx context.Context, org string, groupID, repoID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/repositories/%v", org, groupID, repoID) @@ -227,7 +235,7 @@ func (s *ActionsService) RemoveRepositoryAccessRunnerGroup(ctx context.Context, // ListRunnerGroupRunners lists self-hosted runners that are in a specific organization group. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-self-hosted-runners-in-a-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#list-self-hosted-runners-in-a-group-for-an-organization func (s *ActionsService) ListRunnerGroupRunners(ctx context.Context, org string, groupID int64, opts *ListOptions) (*Runners, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners", org, groupID) u, err := addOptions(u, opts) @@ -252,7 +260,7 @@ func (s *ActionsService) ListRunnerGroupRunners(ctx context.Context, org string, // SetRunnerGroupRunners replaces the list of self-hosted runners that are part of an organization runner group // with a new list of runners. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#set-self-hosted-runners-in-a-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#set-self-hosted-runners-in-a-group-for-an-organization func (s *ActionsService) SetRunnerGroupRunners(ctx context.Context, org string, groupID int64, ids SetRunnerGroupRunnersRequest) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners", org, groupID) @@ -266,7 +274,7 @@ func (s *ActionsService) SetRunnerGroupRunners(ctx context.Context, org string, // AddRunnerGroupRunners adds a self-hosted runner to a runner group configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#add-a-self-hosted-runner-to-a-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#add-a-self-hosted-runner-to-a-group-for-an-organization func (s *ActionsService) AddRunnerGroupRunners(ctx context.Context, org string, groupID, runnerID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners/%v", org, groupID, runnerID) @@ -281,7 +289,7 @@ func (s *ActionsService) AddRunnerGroupRunners(ctx context.Context, org string, // RemoveRunnerGroupRunners removes a self-hosted runner from a group configured in an organization. // The runner is then returned to the default group. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#remove-a-self-hosted-runner-from-a-group-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runner-groups#remove-a-self-hosted-runner-from-a-group-for-an-organization func (s *ActionsService) RemoveRunnerGroupRunners(ctx context.Context, org string, groupID, runnerID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runner-groups/%v/runners/%v", org, groupID, runnerID) diff --git a/vendor/github.com/google/go-github/v42/github/actions_runners.go b/vendor/github.com/google/go-github/v45/github/actions_runners.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/actions_runners.go rename to vendor/github.com/google/go-github/v45/github/actions_runners.go index f37e1aa419..40c6be3a92 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_runners.go +++ b/vendor/github.com/google/go-github/v45/github/actions_runners.go @@ -28,7 +28,7 @@ type ActionsEnabledOnOrgRepos struct { // ListRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-runner-applications-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-a-repository func (s *ActionsService) ListRunnerApplicationDownloads(ctx context.Context, owner, repo string) ([]*RunnerApplicationDownload, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners/downloads", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -53,7 +53,7 @@ type RegistrationToken struct { // CreateRegistrationToken creates a token that can be used to add a self-hosted runner. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-registration-token-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-a-repository func (s *ActionsService) CreateRegistrationToken(ctx context.Context, owner, repo string) (*RegistrationToken, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners/registration-token", owner, repo) @@ -96,7 +96,7 @@ type Runners struct { // ListRunners lists all the self-hosted runners for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-self-hosted-runners-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-a-repository func (s *ActionsService) ListRunners(ctx context.Context, owner, repo string, opts *ListOptions) (*Runners, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners", owner, repo) u, err := addOptions(u, opts) @@ -120,7 +120,7 @@ func (s *ActionsService) ListRunners(ctx context.Context, owner, repo string, op // GetRunner gets a specific self-hosted runner for a repository using its runner ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-self-hosted-runner-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#get-a-self-hosted-runner-for-a-repository func (s *ActionsService) GetRunner(ctx context.Context, owner, repo string, runnerID int64) (*Runner, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners/%v", owner, repo, runnerID) req, err := s.client.NewRequest("GET", u, nil) @@ -145,7 +145,7 @@ type RemoveToken struct { // CreateRemoveToken creates a token that can be used to remove a self-hosted runner from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-remove-token-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-remove-token-for-a-repository func (s *ActionsService) CreateRemoveToken(ctx context.Context, owner, repo string) (*RemoveToken, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners/remove-token", owner, repo) @@ -165,7 +165,7 @@ func (s *ActionsService) CreateRemoveToken(ctx context.Context, owner, repo stri // RemoveRunner forces the removal of a self-hosted runner in a repository using the runner id. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-a-self-hosted-runner-from-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-a-repository func (s *ActionsService) RemoveRunner(ctx context.Context, owner, repo string, runnerID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runners/%v", owner, repo, runnerID) @@ -179,7 +179,7 @@ func (s *ActionsService) RemoveRunner(ctx context.Context, owner, repo string, r // ListOrganizationRunnerApplicationDownloads lists self-hosted runner application binaries that can be downloaded and run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-runner-applications-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-runner-applications-for-an-organization func (s *ActionsService) ListOrganizationRunnerApplicationDownloads(ctx context.Context, owner string) ([]*RunnerApplicationDownload, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners/downloads", owner) req, err := s.client.NewRequest("GET", u, nil) @@ -198,7 +198,7 @@ func (s *ActionsService) ListOrganizationRunnerApplicationDownloads(ctx context. // CreateOrganizationRegistrationToken creates a token that can be used to add a self-hosted runner to an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-registration-token-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-organization func (s *ActionsService) CreateOrganizationRegistrationToken(ctx context.Context, owner string) (*RegistrationToken, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners/registration-token", owner) @@ -218,7 +218,7 @@ func (s *ActionsService) CreateOrganizationRegistrationToken(ctx context.Context // ListOrganizationRunners lists all the self-hosted runners for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-self-hosted-runners-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-organization func (s *ActionsService) ListOrganizationRunners(ctx context.Context, owner string, opts *ListOptions) (*Runners, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners", owner) u, err := addOptions(u, opts) @@ -242,7 +242,7 @@ func (s *ActionsService) ListOrganizationRunners(ctx context.Context, owner stri // ListEnabledReposInOrg lists the selected repositories that are enabled for GitHub Actions in an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-selected-repositories-enabled-for-github-actions-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#list-selected-repositories-enabled-for-github-actions-in-an-organization func (s *ActionsService) ListEnabledReposInOrg(ctx context.Context, owner string, opts *ListOptions) (*ActionsEnabledOnOrgRepos, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/repositories", owner) u, err := addOptions(u, opts) @@ -266,7 +266,7 @@ func (s *ActionsService) ListEnabledReposInOrg(ctx context.Context, owner string // SetEnabledReposInOrg replaces the list of selected repositories that are enabled for GitHub Actions in an organization.. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#set-selected-repositories-enabled-for-github-actions-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-selected-repositories-enabled-for-github-actions-in-an-organization func (s *ActionsService) SetEnabledReposInOrg(ctx context.Context, owner string, repositoryIDs []int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/repositories", owner) @@ -287,7 +287,7 @@ func (s *ActionsService) SetEnabledReposInOrg(ctx context.Context, owner string, // AddEnabledReposInOrg adds a repository to the list of selected repositories that are enabled for GitHub Actions in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#enable-a-selected-repository-for-github-actions-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#enable-a-selected-repository-for-github-actions-in-an-organization func (s *ActionsService) AddEnabledReposInOrg(ctx context.Context, owner string, repositoryID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/repositories/%v", owner, repositoryID) @@ -306,7 +306,7 @@ func (s *ActionsService) AddEnabledReposInOrg(ctx context.Context, owner string, // RemoveEnabledRepoInOrg removes a single repository from the list of enabled repos for GitHub Actions in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#disable-a-selected-repository-for-github-actions-in-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#disable-a-selected-repository-for-github-actions-in-an-organization func (s *ActionsService) RemoveEnabledRepoInOrg(ctx context.Context, owner string, repositoryID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/repositories/%v", owner, repositoryID) @@ -325,7 +325,7 @@ func (s *ActionsService) RemoveEnabledRepoInOrg(ctx context.Context, owner strin // GetOrganizationRunner gets a specific self-hosted runner for an organization using its runner ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-self-hosted-runner-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#get-a-self-hosted-runner-for-an-organization func (s *ActionsService) GetOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*Runner, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners/%v", owner, runnerID) req, err := s.client.NewRequest("GET", u, nil) @@ -344,7 +344,7 @@ func (s *ActionsService) GetOrganizationRunner(ctx context.Context, owner string // CreateOrganizationRemoveToken creates a token that can be used to remove a self-hosted runner from an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-remove-token-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-remove-token-for-an-organization func (s *ActionsService) CreateOrganizationRemoveToken(ctx context.Context, owner string) (*RemoveToken, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners/remove-token", owner) @@ -364,7 +364,7 @@ func (s *ActionsService) CreateOrganizationRemoveToken(ctx context.Context, owne // RemoveOrganizationRunner forces the removal of a self-hosted runner from an organization using the runner id. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-a-self-hosted-runner-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-organization func (s *ActionsService) RemoveOrganizationRunner(ctx context.Context, owner string, runnerID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/actions/runners/%v", owner, runnerID) diff --git a/vendor/github.com/google/go-github/v42/github/actions_secrets.go b/vendor/github.com/google/go-github/v45/github/actions_secrets.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/actions_secrets.go rename to vendor/github.com/google/go-github/v45/github/actions_secrets.go index 29f70a1a16..dc057edba2 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_secrets.go +++ b/vendor/github.com/google/go-github/v45/github/actions_secrets.go @@ -64,7 +64,7 @@ func (s *ActionsService) getPublicKey(ctx context.Context, url string) (*PublicK // GetRepoPublicKey gets a public key that should be used for secret encryption. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-repository-public-key +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-a-repository-public-key func (s *ActionsService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { url := fmt.Sprintf("repos/%v/%v/actions/secrets/public-key", owner, repo) return s.getPublicKey(ctx, url) @@ -72,7 +72,7 @@ func (s *ActionsService) GetRepoPublicKey(ctx context.Context, owner, repo strin // GetOrgPublicKey gets a public key that should be used for secret encryption. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-an-organization-public-key +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-organization-public-key func (s *ActionsService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/public-key", org) return s.getPublicKey(ctx, url) @@ -80,7 +80,7 @@ func (s *ActionsService) GetOrgPublicKey(ctx context.Context, org string) (*Publ // GetEnvPublicKey gets a public key that should be used for secret encryption. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#get-an-environment-public-key +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-environment-public-key func (s *ActionsService) GetEnvPublicKey(ctx context.Context, repoID int, env string) (*PublicKey, *Response, error) { url := fmt.Sprintf("repositories/%v/environments/%v/secrets/public-key", repoID, env) return s.getPublicKey(ctx, url) @@ -124,7 +124,7 @@ func (s *ActionsService) listSecrets(ctx context.Context, url string, opts *List // ListRepoSecrets lists all secrets available in a repository // without revealing their encrypted values. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-repository-secrets +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-repository-secrets func (s *ActionsService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { url := fmt.Sprintf("repos/%v/%v/actions/secrets", owner, repo) return s.listSecrets(ctx, url, opts) @@ -133,7 +133,7 @@ func (s *ActionsService) ListRepoSecrets(ctx context.Context, owner, repo string // ListOrgSecrets lists all secrets available in an organization // without revealing their encrypted values. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-organization-secrets +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-organization-secrets func (s *ActionsService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets", org) return s.listSecrets(ctx, url, opts) @@ -141,7 +141,7 @@ func (s *ActionsService) ListOrgSecrets(ctx context.Context, org string, opts *L // ListEnvSecrets lists all secrets available in an environment. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-environment-secrets +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-environment-secrets func (s *ActionsService) ListEnvSecrets(ctx context.Context, repoID int, env string, opts *ListOptions) (*Secrets, *Response, error) { url := fmt.Sprintf("repositories/%v/environments/%v/secrets", repoID, env) return s.listSecrets(ctx, url, opts) @@ -164,7 +164,7 @@ func (s *ActionsService) getSecret(ctx context.Context, url string) (*Secret, *R // GetRepoSecret gets a single repository secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-a-repository-secret func (s *ActionsService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, name) return s.getSecret(ctx, url) @@ -172,7 +172,7 @@ func (s *ActionsService) GetRepoSecret(ctx context.Context, owner, repo, name st // GetOrgSecret gets a single organization secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-organization-secret func (s *ActionsService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, name) return s.getSecret(ctx, url) @@ -180,7 +180,7 @@ func (s *ActionsService) GetOrgSecret(ctx context.Context, org, name string) (*S // GetEnvSecret gets a single environment secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#list-environment-secrets +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#get-an-environment-secret func (s *ActionsService) GetEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Secret, *Response, error) { url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, secretName) return s.getSecret(ctx, url) @@ -213,7 +213,7 @@ func (s *ActionsService) putSecret(ctx context.Context, url string, eSecret *Enc // CreateOrUpdateRepoSecret creates or updates a repository secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-or-update-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-a-repository-secret func (s *ActionsService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, eSecret.Name) return s.putSecret(ctx, url, eSecret) @@ -221,7 +221,7 @@ func (s *ActionsService) CreateOrUpdateRepoSecret(ctx context.Context, owner, re // CreateOrUpdateOrgSecret creates or updates an organization secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-or-update-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-an-organization-secret func (s *ActionsService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, eSecret.Name) return s.putSecret(ctx, url, eSecret) @@ -229,7 +229,7 @@ func (s *ActionsService) CreateOrUpdateOrgSecret(ctx context.Context, org string // CreateOrUpdateEnvSecret creates or updates a single environment secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#create-or-update-an-environment-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#create-or-update-an-environment-secret func (s *ActionsService) CreateOrUpdateEnvSecret(ctx context.Context, repoID int, env string, eSecret *EncryptedSecret) (*Response, error) { url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, eSecret.Name) return s.putSecret(ctx, url, eSecret) @@ -246,7 +246,7 @@ func (s *ActionsService) deleteSecret(ctx context.Context, url string) (*Respons // DeleteRepoSecret deletes a secret in a repository using the secret name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-a-repository-secret func (s *ActionsService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/actions/secrets/%v", owner, repo, name) return s.deleteSecret(ctx, url) @@ -254,7 +254,7 @@ func (s *ActionsService) DeleteRepoSecret(ctx context.Context, owner, repo, name // DeleteOrgSecret deletes a secret in an organization using the secret name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-an-organization-secret func (s *ActionsService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v", org, name) return s.deleteSecret(ctx, url) @@ -262,7 +262,7 @@ func (s *ActionsService) DeleteOrgSecret(ctx context.Context, org, name string) // DeleteEnvSecret deletes a secret in an environment using the secret name. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#delete-an-environment-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#delete-an-environment-secret func (s *ActionsService) DeleteEnvSecret(ctx context.Context, repoID int, env, secretName string) (*Response, error) { url := fmt.Sprintf("repositories/%v/environments/%v/secrets/%v", repoID, env, secretName) return s.deleteSecret(ctx, url) @@ -296,7 +296,7 @@ func (s *ActionsService) listSelectedReposForSecret(ctx context.Context, url str // ListSelectedReposForOrgSecret lists all repositories that have access to a secret. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-selected-repositories-for-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#list-selected-repositories-for-an-organization-secret func (s *ActionsService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories", org, name) return s.listSelectedReposForSecret(ctx, url, opts) @@ -317,7 +317,7 @@ func (s *ActionsService) setSelectedReposForSecret(ctx context.Context, url stri // SetSelectedReposForOrgSecret sets the repositories that have access to a secret. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#set-selected-repositories-for-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#set-selected-repositories-for-an-organization-secret func (s *ActionsService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories", org, name) return s.setSelectedReposForSecret(ctx, url, ids) @@ -334,7 +334,7 @@ func (s *ActionsService) addSelectedRepoToSecret(ctx context.Context, url string // AddSelectedRepoToOrgSecret adds a repository to an organization secret. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#add-selected-repository-to-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#add-selected-repository-to-an-organization-secret func (s *ActionsService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories/%v", org, name, *repo.ID) return s.addSelectedRepoToSecret(ctx, url) @@ -351,7 +351,7 @@ func (s *ActionsService) removeSelectedRepoFromSecret(ctx context.Context, url s // RemoveSelectedRepoFromOrgSecret removes a repository from an organization secret. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#remove-selected-repository-from-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/actions/secrets#remove-selected-repository-from-an-organization-secret func (s *ActionsService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { url := fmt.Sprintf("orgs/%v/actions/secrets/%v/repositories/%v", org, name, *repo.ID) return s.removeSelectedRepoFromSecret(ctx, url) diff --git a/vendor/github.com/google/go-github/v42/github/actions_workflow_jobs.go b/vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go similarity index 77% rename from vendor/github.com/google/go-github/v42/github/actions_workflow_jobs.go rename to vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go index 66b8ff6edb..2867e82af0 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_workflow_jobs.go +++ b/vendor/github.com/google/go-github/v45/github/actions_workflow_jobs.go @@ -66,7 +66,7 @@ type ListWorkflowJobsOptions struct { // ListWorkflowJobs lists all jobs for a workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-jobs-for-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#list-jobs-for-a-workflow-run func (s *ActionsService) ListWorkflowJobs(ctx context.Context, owner, repo string, runID int64, opts *ListWorkflowJobsOptions) (*Jobs, *Response, error) { u := fmt.Sprintf("repos/%s/%s/actions/runs/%v/jobs", owner, repo, runID) u, err := addOptions(u, opts) @@ -90,7 +90,7 @@ func (s *ActionsService) ListWorkflowJobs(ctx context.Context, owner, repo strin // GetWorkflowJobByID gets a specific job in a workflow run by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-job-for-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#get-a-job-for-a-workflow-run func (s *ActionsService) GetWorkflowJobByID(ctx context.Context, owner, repo string, jobID int64) (*WorkflowJob, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v", owner, repo, jobID) @@ -110,45 +110,20 @@ func (s *ActionsService) GetWorkflowJobByID(ctx context.Context, owner, repo str // GetWorkflowJobLogs gets a redirect URL to download a plain text file of logs for a workflow job. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#download-job-logs-for-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-jobs#download-job-logs-for-a-workflow-run func (s *ActionsService) GetWorkflowJobLogs(ctx context.Context, owner, repo string, jobID int64, followRedirects bool) (*url.URL, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/logs", owner, repo, jobID) - resp, err := s.getWorkflowLogsFromURL(ctx, u, followRedirects) + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) if err != nil { return nil, nil, err } + defer resp.Body.Close() if resp.StatusCode != http.StatusFound { return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) } + parsedURL, err := url.Parse(resp.Header.Get("Location")) return parsedURL, newResponse(resp), err } - -func (s *ActionsService) getWorkflowLogsFromURL(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, err - } - resp.Body.Close() - - // If redirect response is returned, follow it - if followRedirects && resp.StatusCode == http.StatusMovedPermanently { - u = resp.Header.Get("Location") - resp, err = s.getWorkflowLogsFromURL(ctx, u, false) - } - return resp, err -} diff --git a/vendor/github.com/google/go-github/v42/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go similarity index 73% rename from vendor/github.com/google/go-github/v42/github/actions_workflow_runs.go rename to vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go index 1e1ff61ae8..18fdd57d6a 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_workflow_runs.go +++ b/vendor/github.com/google/go-github/v45/github/actions_workflow_runs.go @@ -44,6 +44,7 @@ type WorkflowRun struct { WorkflowURL *string `json:"workflow_url,omitempty"` Repository *Repository `json:"repository,omitempty"` HeadRepository *Repository `json:"head_repository,omitempty"` + Actor *User `json:"actor,omitempty"` } // WorkflowRuns represents a slice of repository action workflow run. @@ -88,6 +89,11 @@ type WorkflowRunJobRun struct { DurationMS *int64 `json:"duration_ms,omitempty"` } +// WorkflowRunAttemptOptions specifies optional parameters to GetWorkflowRunAttempt. +type WorkflowRunAttemptOptions struct { + ExcludePullRequests *bool `url:"exclude_pull_requests,omitempty"` +} + func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u, err := addOptions(endpoint, opts) if err != nil { @@ -110,7 +116,7 @@ func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, // ListWorkflowRunsByID lists all workflow runs by workflow ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-workflow-runs +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs func (s *ActionsService) ListWorkflowRunsByID(ctx context.Context, owner, repo string, workflowID int64, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowID) return s.listWorkflowRuns(ctx, u, opts) @@ -118,7 +124,7 @@ func (s *ActionsService) ListWorkflowRunsByID(ctx context.Context, owner, repo s // ListWorkflowRunsByFileName lists all workflow runs by workflow file name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-workflow-runs +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs func (s *ActionsService) ListWorkflowRunsByFileName(ctx context.Context, owner, repo, workflowFileName string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u := fmt.Sprintf("repos/%s/%s/actions/workflows/%v/runs", owner, repo, workflowFileName) return s.listWorkflowRuns(ctx, u, opts) @@ -126,7 +132,7 @@ func (s *ActionsService) ListWorkflowRunsByFileName(ctx context.Context, owner, // ListRepositoryWorkflowRuns lists all workflow runs for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-workflow-runs-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#list-workflow-runs-for-a-repository func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner, repo string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u := fmt.Sprintf("repos/%s/%s/actions/runs", owner, repo) u, err := addOptions(u, opts) @@ -150,7 +156,7 @@ func (s *ActionsService) ListRepositoryWorkflowRuns(ctx context.Context, owner, // GetWorkflowRunByID gets a specific workflow run by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRun, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID) @@ -168,9 +174,33 @@ func (s *ActionsService) GetWorkflowRunByID(ctx context.Context, owner, repo str return run, resp, nil } +// GetWorkflowRunAttempt gets a specific workflow run attempt. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-a-workflow-run-attempt +func (s *ActionsService) GetWorkflowRunAttempt(ctx context.Context, owner, repo string, runID int64, attemptNumber int, opts *WorkflowRunAttemptOptions) (*WorkflowRun, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/attempts/%v", owner, repo, runID, attemptNumber) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + run := new(WorkflowRun) + resp, err := s.client.Do(ctx, req, run) + if err != nil { + return nil, resp, err + } + + return run, resp, nil +} + // RerunWorkflowByID re-runs a workflow by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#re-run-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-workflow func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun", owner, repo, runID) @@ -182,9 +212,37 @@ func (s *ActionsService) RerunWorkflowByID(ctx context.Context, owner, repo stri return s.client.Do(ctx, req, nil) } +// RerunFailedJobsByID re-runs all of the failed jobs and their dependent jobs in a workflow run by ID. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-failed-jobs-from-a-workflow-run +func (s *ActionsService) RerunFailedJobsByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/rerun-failed-jobs", owner, repo, runID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// RerunJobByID re-runs a job and its dependent jobs in a workflow run by ID. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#re-run-a-job-from-a-workflow-run +func (s *ActionsService) RerunJobByID(ctx context.Context, owner, repo string, jobID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/jobs/%v/rerun", owner, repo, jobID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + // CancelWorkflowRunByID cancels a workflow run by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#cancel-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#cancel-a-workflow-run func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo string, runID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/cancel", owner, repo, runID) @@ -198,25 +256,27 @@ func (s *ActionsService) CancelWorkflowRunByID(ctx context.Context, owner, repo // GetWorkflowRunLogs gets a redirect URL to download a plain text file of logs for a workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#download-workflow-run-logs +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#download-workflow-run-logs func (s *ActionsService) GetWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64, followRedirects bool) (*url.URL, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID) - resp, err := s.getWorkflowLogsFromURL(ctx, u, followRedirects) + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) if err != nil { return nil, nil, err } + defer resp.Body.Close() if resp.StatusCode != http.StatusFound { return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) } + parsedURL, err := url.Parse(resp.Header.Get("Location")) return parsedURL, newResponse(resp), err } // DeleteWorkflowRun deletes a workflow run by ID. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#delete-a-workflow-run +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-a-workflow-run func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo string, runID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v", owner, repo, runID) @@ -230,7 +290,7 @@ func (s *ActionsService) DeleteWorkflowRun(ctx context.Context, owner, repo stri // DeleteWorkflowRunLogs deletes all logs for a workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#delete-workflow-run-logs +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#delete-workflow-run-logs func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo string, runID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/logs", owner, repo, runID) @@ -244,7 +304,7 @@ func (s *ActionsService) DeleteWorkflowRunLogs(ctx context.Context, owner, repo // GetWorkflowRunUsageByID gets a specific workflow usage run by run ID in the unit of billable milliseconds. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-workflow-run-usage +// GitHub API docs: https://docs.github.com/en/rest/actions/workflow-runs#get-workflow-run-usage func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, repo string, runID int64) (*WorkflowRunUsage, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/timing", owner, repo, runID) diff --git a/vendor/github.com/google/go-github/v42/github/actions_workflows.go b/vendor/github.com/google/go-github/v45/github/actions_workflows.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/actions_workflows.go rename to vendor/github.com/google/go-github/v45/github/actions_workflows.go index 3ce926fed6..9973a5d3f3 100644 --- a/vendor/github.com/google/go-github/v42/github/actions_workflows.go +++ b/vendor/github.com/google/go-github/v45/github/actions_workflows.go @@ -50,7 +50,7 @@ type WorkflowBill struct { // CreateWorkflowDispatchEventRequest represents a request to create a workflow dispatch event. type CreateWorkflowDispatchEventRequest struct { // Ref represents the reference of the workflow run. - // The reference can be a branch, tag, or a commit SHA. + // The reference can be a branch or a tag. // Ref is required when creating a workflow dispatch event. Ref string `json:"ref"` // Inputs represents input keys and values configured in the workflow file. @@ -61,7 +61,7 @@ type CreateWorkflowDispatchEventRequest struct { // ListWorkflows lists all workflows in a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#list-repository-workflows +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#list-repository-workflows func (s *ActionsService) ListWorkflows(ctx context.Context, owner, repo string, opts *ListOptions) (*Workflows, *Response, error) { u := fmt.Sprintf("repos/%s/%s/actions/workflows", owner, repo) u, err := addOptions(u, opts) @@ -85,7 +85,7 @@ func (s *ActionsService) ListWorkflows(ctx context.Context, owner, repo string, // GetWorkflowByID gets a specific workflow by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-a-workflow func (s *ActionsService) GetWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Workflow, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v", owner, repo, workflowID) @@ -94,7 +94,7 @@ func (s *ActionsService) GetWorkflowByID(ctx context.Context, owner, repo string // GetWorkflowByFileName gets a specific workflow by file name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-a-workflow func (s *ActionsService) GetWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Workflow, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v", owner, repo, workflowFileName) @@ -118,7 +118,7 @@ func (s *ActionsService) getWorkflow(ctx context.Context, url string) (*Workflow // GetWorkflowUsageByID gets a specific workflow usage by ID in the unit of billable milliseconds. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-workflow-usage +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-workflow-usage func (s *ActionsService) GetWorkflowUsageByID(ctx context.Context, owner, repo string, workflowID int64) (*WorkflowUsage, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/timing", owner, repo, workflowID) @@ -127,7 +127,7 @@ func (s *ActionsService) GetWorkflowUsageByID(ctx context.Context, owner, repo s // GetWorkflowUsageByFileName gets a specific workflow usage by file name in the unit of billable milliseconds. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#get-workflow-usage +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#get-workflow-usage func (s *ActionsService) GetWorkflowUsageByFileName(ctx context.Context, owner, repo, workflowFileName string) (*WorkflowUsage, *Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/timing", owner, repo, workflowFileName) @@ -151,7 +151,7 @@ func (s *ActionsService) getWorkflowUsage(ctx context.Context, url string) (*Wor // CreateWorkflowDispatchEventByID manually triggers a GitHub Actions workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-workflow-dispatch-event +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#create-a-workflow-dispatch-event func (s *ActionsService) CreateWorkflowDispatchEventByID(ctx context.Context, owner, repo string, workflowID int64, event CreateWorkflowDispatchEventRequest) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/dispatches", owner, repo, workflowID) @@ -160,7 +160,7 @@ func (s *ActionsService) CreateWorkflowDispatchEventByID(ctx context.Context, ow // CreateWorkflowDispatchEventByFileName manually triggers a GitHub Actions workflow run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#create-a-workflow-dispatch-event +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#create-a-workflow-dispatch-event func (s *ActionsService) CreateWorkflowDispatchEventByFileName(ctx context.Context, owner, repo, workflowFileName string, event CreateWorkflowDispatchEventRequest) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/dispatches", owner, repo, workflowFileName) @@ -178,7 +178,7 @@ func (s *ActionsService) createWorkflowDispatchEvent(ctx context.Context, url st // EnableWorkflowByID enables a workflow and sets the state of the workflow to "active". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#enable-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#enable-a-workflow func (s *ActionsService) EnableWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/enable", owner, repo, workflowID) return s.doNewPutRequest(ctx, u) @@ -186,7 +186,7 @@ func (s *ActionsService) EnableWorkflowByID(ctx context.Context, owner, repo str // EnableWorkflowByFileName enables a workflow and sets the state of the workflow to "active". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#enable-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#enable-a-workflow func (s *ActionsService) EnableWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/enable", owner, repo, workflowFileName) return s.doNewPutRequest(ctx, u) @@ -194,7 +194,7 @@ func (s *ActionsService) EnableWorkflowByFileName(ctx context.Context, owner, re // DisableWorkflowByID disables a workflow and sets the state of the workflow to "disabled_manually". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#disable-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#disable-a-workflow func (s *ActionsService) DisableWorkflowByID(ctx context.Context, owner, repo string, workflowID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/disable", owner, repo, workflowID) return s.doNewPutRequest(ctx, u) @@ -202,7 +202,7 @@ func (s *ActionsService) DisableWorkflowByID(ctx context.Context, owner, repo st // DisableWorkflowByFileName disables a workflow and sets the state of the workflow to "disabled_manually". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/actions/#disable-a-workflow +// GitHub API docs: https://docs.github.com/en/rest/actions/workflows#disable-a-workflow func (s *ActionsService) DisableWorkflowByFileName(ctx context.Context, owner, repo, workflowFileName string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/actions/workflows/%v/disable", owner, repo, workflowFileName) return s.doNewPutRequest(ctx, u) diff --git a/vendor/github.com/google/go-github/v42/github/activity.go b/vendor/github.com/google/go-github/v45/github/activity.go similarity index 96% rename from vendor/github.com/google/go-github/v42/github/activity.go rename to vendor/github.com/google/go-github/v45/github/activity.go index e683afb99b..f99ecfcdff 100644 --- a/vendor/github.com/google/go-github/v42/github/activity.go +++ b/vendor/github.com/google/go-github/v45/github/activity.go @@ -10,7 +10,7 @@ import "context" // ActivityService handles communication with the activity related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/ +// GitHub API docs: https://docs.github.com/en/rest/activity/ type ActivityService service // FeedLink represents a link to a related resource. diff --git a/vendor/github.com/google/go-github/v42/github/activity_events.go b/vendor/github.com/google/go-github/v45/github/activity_events.go similarity index 79% rename from vendor/github.com/google/go-github/v42/github/activity_events.go rename to vendor/github.com/google/go-github/v45/github/activity_events.go index 19dc15cfe6..d6f0f043b0 100644 --- a/vendor/github.com/google/go-github/v42/github/activity_events.go +++ b/vendor/github.com/google/go-github/v45/github/activity_events.go @@ -12,7 +12,7 @@ import ( // ListEvents drinks from the firehose of all public events across GitHub. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-public-events +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events func (s *ActivityService) ListEvents(ctx context.Context, opts *ListOptions) ([]*Event, *Response, error) { u, err := addOptions("events", opts) if err != nil { @@ -35,7 +35,7 @@ func (s *ActivityService) ListEvents(ctx context.Context, opts *ListOptions) ([] // ListRepositoryEvents lists events for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repository-events +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-repository-events func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Event, *Response, error) { u := fmt.Sprintf("repos/%v/%v/events", owner, repo) u, err := addOptions(u, opts) @@ -59,7 +59,7 @@ func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo // ListIssueEventsForRepository lists issue events for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issue-events-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events-for-a-repository func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owner, repo string, opts *ListOptions) ([]*IssueEvent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) u, err := addOptions(u, opts) @@ -83,7 +83,7 @@ func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owne // ListEventsForRepoNetwork lists public events for a network of repositories. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-public-events-for-a-network-of-repositories +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-for-a-network-of-repositories func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Event, *Response, error) { u := fmt.Sprintf("networks/%v/%v/events", owner, repo) u, err := addOptions(u, opts) @@ -107,7 +107,7 @@ func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, r // ListEventsForOrganization lists public events for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-public-organization-events +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-organization-events func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org string, opts *ListOptions) ([]*Event, *Response, error) { u := fmt.Sprintf("orgs/%v/events", org) u, err := addOptions(u, opts) @@ -132,8 +132,8 @@ func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org str // ListEventsPerformedByUser lists the events performed by a user. If publicOnly is // true, only public events will be returned. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-events-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-public-events-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-events-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-for-a-user func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user string, publicOnly bool, opts *ListOptions) ([]*Event, *Response, error) { var u string if publicOnly { @@ -163,8 +163,8 @@ func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user st // ListEventsReceivedByUser lists the events received by a user. If publicOnly is // true, only public events will be returned. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-events-received-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-public-events-received-by-a-user +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-events-received-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-public-events-received-by-a-user func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user string, publicOnly bool, opts *ListOptions) ([]*Event, *Response, error) { var u string if publicOnly { @@ -194,7 +194,7 @@ func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user str // ListUserEventsForOrganization provides the user’s organization dashboard. You // must be authenticated as the user to view this. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-organization-events-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/events#list-organization-events-for-the-authenticated-user func (s *ActivityService) ListUserEventsForOrganization(ctx context.Context, org, user string, opts *ListOptions) ([]*Event, *Response, error) { u := fmt.Sprintf("users/%v/events/orgs/%v", user, org) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/activity_notifications.go b/vendor/github.com/google/go-github/v45/github/activity_notifications.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/activity_notifications.go rename to vendor/github.com/google/go-github/v45/github/activity_notifications.go index 009cc5e32a..38a3184536 100644 --- a/vendor/github.com/google/go-github/v42/github/activity_notifications.go +++ b/vendor/github.com/google/go-github/v45/github/activity_notifications.go @@ -19,7 +19,7 @@ type Notification struct { // Reason identifies the event that triggered the notification. // - // GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity#notification-reasons + // GitHub API docs: https://docs.github.com/en/rest/activity#notification-reasons Reason *string `json:"reason,omitempty"` Unread *bool `json:"unread,omitempty"` @@ -49,7 +49,7 @@ type NotificationListOptions struct { // ListNotifications lists all notifications for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-notifications-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#list-notifications-for-the-authenticated-user func (s *ActivityService) ListNotifications(ctx context.Context, opts *NotificationListOptions) ([]*Notification, *Response, error) { u := "notifications" u, err := addOptions(u, opts) @@ -74,7 +74,7 @@ func (s *ActivityService) ListNotifications(ctx context.Context, opts *Notificat // ListRepositoryNotifications lists all notifications in a given repository // for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repository-notifications-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#list-repository-notifications-for-the-authenticated-user func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner, repo string, opts *NotificationListOptions) ([]*Notification, *Response, error) { u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo) u, err := addOptions(u, opts) @@ -102,7 +102,7 @@ type markReadOptions struct { // MarkNotificationsRead marks all notifications up to lastRead as read. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity#mark-as-read +// GitHub API docs: https://docs.github.com/en/rest/activity#mark-as-read func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead time.Time) (*Response, error) { opts := &markReadOptions{ LastReadAt: lastRead, @@ -118,7 +118,7 @@ func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead ti // MarkRepositoryNotificationsRead marks all notifications up to lastRead in // the specified repository as read. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#mark-repository-notifications-as-read +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#mark-repository-notifications-as-read func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead time.Time) (*Response, error) { opts := &markReadOptions{ LastReadAt: lastRead, @@ -134,7 +134,7 @@ func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, o // GetThread gets the specified notification thread. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#get-a-thread +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#get-a-thread func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notification, *Response, error) { u := fmt.Sprintf("notifications/threads/%v", id) @@ -154,7 +154,7 @@ func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notificati // MarkThreadRead marks the specified thread as read. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#mark-a-thread-as-read +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#mark-a-thread-as-read func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Response, error) { u := fmt.Sprintf("notifications/threads/%v", id) @@ -169,7 +169,7 @@ func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Respo // GetThreadSubscription checks to see if the authenticated user is subscribed // to a thread. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#get-a-thread-subscription-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#get-a-thread-subscription-for-the-authenticated-user func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) (*Subscription, *Response, error) { u := fmt.Sprintf("notifications/threads/%v/subscription", id) @@ -190,7 +190,7 @@ func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) // SetThreadSubscription sets the subscription for the specified thread for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#set-a-thread-subscription +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#set-a-thread-subscription func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, subscription *Subscription) (*Subscription, *Response, error) { u := fmt.Sprintf("notifications/threads/%v/subscription", id) @@ -211,7 +211,7 @@ func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, // DeleteThreadSubscription deletes the subscription for the specified thread // for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#delete-a-thread-subscription +// GitHub API docs: https://docs.github.com/en/rest/activity/notifications#delete-a-thread-subscription func (s *ActivityService) DeleteThreadSubscription(ctx context.Context, id string) (*Response, error) { u := fmt.Sprintf("notifications/threads/%v/subscription", id) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/activity_star.go b/vendor/github.com/google/go-github/v45/github/activity_star.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/activity_star.go rename to vendor/github.com/google/go-github/v45/github/activity_star.go index ad07aac752..65a316f532 100644 --- a/vendor/github.com/google/go-github/v42/github/activity_star.go +++ b/vendor/github.com/google/go-github/v45/github/activity_star.go @@ -25,7 +25,7 @@ type Stargazer struct { // ListStargazers lists people who have starred the specified repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-stargazers +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-stargazers func (s *ActivityService) ListStargazers(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Stargazer, *Response, error) { u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo) u, err := addOptions(u, opts) @@ -67,8 +67,8 @@ type ActivityListStarredOptions struct { // ListStarred lists all the repos starred by a user. Passing the empty string // will list the starred repositories for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repositories-starred-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repositories-starred-by-a-user +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-repositories-starred-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#list-repositories-starred-by-a-user func (s *ActivityService) ListStarred(ctx context.Context, user string, opts *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) { var u string if user != "" { @@ -101,13 +101,14 @@ func (s *ActivityService) ListStarred(ctx context.Context, user string, opts *Ac // IsStarred checks if a repository is starred by authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#check-if-a-repository-is-starred-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#check-if-a-repository-is-starred-by-the-authenticated-user func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bool, *Response, error) { u := fmt.Sprintf("user/starred/%v/%v", owner, repo) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return false, nil, err } + resp, err := s.client.Do(ctx, req, nil) starred, err := parseBoolResponse(err) return starred, resp, err @@ -115,24 +116,26 @@ func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bo // Star a repository as the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#star-a-repository-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#star-a-repository-for-the-authenticated-user func (s *ActivityService) Star(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("user/starred/%v/%v", owner, repo) req, err := s.client.NewRequest("PUT", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // Unstar a repository as the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#unstar-a-repository-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/starring#unstar-a-repository-for-the-authenticated-user func (s *ActivityService) Unstar(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("user/starred/%v/%v", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } diff --git a/vendor/github.com/google/go-github/v42/github/activity_watching.go b/vendor/github.com/google/go-github/v45/github/activity_watching.go similarity index 84% rename from vendor/github.com/google/go-github/v42/github/activity_watching.go rename to vendor/github.com/google/go-github/v45/github/activity_watching.go index 16cceb53e5..2d6fafcc79 100644 --- a/vendor/github.com/google/go-github/v42/github/activity_watching.go +++ b/vendor/github.com/google/go-github/v45/github/activity_watching.go @@ -27,7 +27,7 @@ type Subscription struct { // ListWatchers lists watchers of a particular repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-watchers +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-watchers func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, opts *ListOptions) ([]*User, *Response, error) { u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo) u, err := addOptions(u, opts) @@ -52,8 +52,8 @@ func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, // ListWatched lists the repositories the specified user is watching. Passing // the empty string will fetch watched repos for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repositories-watched-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#list-repositories-watched-by-a-user +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-repositories-watched-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#list-repositories-watched-by-a-user func (s *ActivityService) ListWatched(ctx context.Context, user string, opts *ListOptions) ([]*Repository, *Response, error) { var u string if user != "" { @@ -84,7 +84,7 @@ func (s *ActivityService) ListWatched(ctx context.Context, user string, opts *Li // repository for the authenticated user. If the authenticated user is not // watching the repository, a nil Subscription is returned. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#get-a-repository-subscription +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#get-a-repository-subscription func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, repo string) (*Subscription, *Response, error) { u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) @@ -111,7 +111,7 @@ func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, // To ignore notifications made within a repository, set subscription.Ignored to true. // To stop watching a repository, use DeleteRepositorySubscription. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#set-a-repository-subscription +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#set-a-repository-subscription func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, repo string, subscription *Subscription) (*Subscription, *Response, error) { u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) @@ -135,7 +135,7 @@ func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, // This is used to stop watching a repository. To control whether or not to // receive notifications from a repository, use SetRepositorySubscription. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/activity/#delete-a-repository-subscription +// GitHub API docs: https://docs.github.com/en/rest/activity/watching#delete-a-repository-subscription func (s *ActivityService) DeleteRepositorySubscription(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/admin.go b/vendor/github.com/google/go-github/v45/github/admin.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/admin.go rename to vendor/github.com/google/go-github/v45/github/admin.go index 7bf0f22b13..1b28ef64c7 100644 --- a/vendor/github.com/google/go-github/v42/github/admin.go +++ b/vendor/github.com/google/go-github/v45/github/admin.go @@ -14,7 +14,7 @@ import ( // GitHub API. These API routes are normally only accessible for GitHub // Enterprise installations. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise/ +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin type AdminService service // TeamLDAPMapping represents the mapping between a GitHub team and an LDAP group. @@ -82,7 +82,7 @@ func (m Enterprise) String() string { // UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise/ldap/#update-ldap-mapping-for-a-user +// GitHub API docs: https://docs.github.com/en/enterprise-server/rest/enterprise-admin/ldap#update-ldap-mapping-for-a-user func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) { u := fmt.Sprintf("admin/ldap/users/%v/mapping", user) req, err := s.client.NewRequest("PATCH", u, mapping) @@ -101,7 +101,7 @@ func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, m // UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise/ldap/#update-ldap-mapping-for-a-team +// GitHub API docs: https://docs.github.com/en/rest/enterprise/ldap/#update-ldap-mapping-for-a-team func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int64, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) { u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team) req, err := s.client.NewRequest("PATCH", u, mapping) diff --git a/vendor/github.com/google/go-github/v42/github/admin_orgs.go b/vendor/github.com/google/go-github/v45/github/admin_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/admin_orgs.go rename to vendor/github.com/google/go-github/v45/github/admin_orgs.go diff --git a/vendor/github.com/google/go-github/v42/github/admin_stats.go b/vendor/github.com/google/go-github/v45/github/admin_stats.go similarity index 97% rename from vendor/github.com/google/go-github/v42/github/admin_stats.go rename to vendor/github.com/google/go-github/v45/github/admin_stats.go index 0744ffa415..ef294f4479 100644 --- a/vendor/github.com/google/go-github/v42/github/admin_stats.go +++ b/vendor/github.com/google/go-github/v45/github/admin_stats.go @@ -153,7 +153,7 @@ func (s RepoStats) String() string { // Please note that this is only available to site administrators, // otherwise it will error with a 404 not found (instead of 401 or 403). // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise-admin/admin_stats/ +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/admin_stats/ func (s *AdminService) GetAdminStats(ctx context.Context) (*AdminStats, *Response, error) { u := fmt.Sprintf("enterprise/stats/all") req, err := s.client.NewRequest("GET", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/admin_users.go b/vendor/github.com/google/go-github/v45/github/admin_users.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/admin_users.go rename to vendor/github.com/google/go-github/v45/github/admin_users.go diff --git a/vendor/github.com/google/go-github/v42/github/apps.go b/vendor/github.com/google/go-github/v45/github/apps.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/apps.go rename to vendor/github.com/google/go-github/v45/github/apps.go index 3823a12196..dff9b210f2 100644 --- a/vendor/github.com/google/go-github/v42/github/apps.go +++ b/vendor/github.com/google/go-github/v45/github/apps.go @@ -14,7 +14,7 @@ import ( // AppsService provides access to the installation related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/ +// GitHub API docs: https://docs.github.com/en/rest/apps/ type AppsService service // App represents a GitHub App. @@ -59,8 +59,8 @@ type InstallationTokenOptions struct { // InstallationPermissions lists the repository and organization permissions for an installation. // // Permission names taken from: -// https://docs.github.com/en/enterprise-server@3.0/rest/reference/apps#create-an-installation-access-token-for-an-app -// https://docs.github.com/en/rest/reference/apps#create-an-installation-access-token-for-an-app +// https://docs.github.com/en/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app +// https://docs.github.com/en/rest/apps#create-an-installation-access-token-for-an-app type InstallationPermissions struct { Actions *string `json:"actions,omitempty"` Administration *string `json:"administration,omitempty"` @@ -148,8 +148,8 @@ func (i Installation) String() string { // You can find this on the settings page for your GitHub App // (e.g., https://github.com/settings/apps/:app_slug). // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-the-authenticated-app -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-an-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-app func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, error) { var u string if appSlug != "" { @@ -174,7 +174,7 @@ func (s *AppsService) Get(ctx context.Context, appSlug string) (*App, *Response, // ListInstallations lists the installations that the current GitHub App has. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-installations-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#list-installations-for-the-authenticated-app func (s *AppsService) ListInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) { u, err := addOptions("app/installations", opts) if err != nil { @@ -197,14 +197,14 @@ func (s *AppsService) ListInstallations(ctx context.Context, opts *ListOptions) // GetInstallation returns the specified installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-an-installation-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-installation-for-the-authenticated-app func (s *AppsService) GetInstallation(ctx context.Context, id int64) (*Installation, *Response, error) { return s.getInstallation(ctx, fmt.Sprintf("app/installations/%v", id)) } // ListUserInstallations lists installations that are accessible to the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-app-installations-accessible-to-the-user-access-token +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-app-installations-accessible-to-the-user-access-token func (s *AppsService) ListUserInstallations(ctx context.Context, opts *ListOptions) ([]*Installation, *Response, error) { u, err := addOptions("user/installations", opts) if err != nil { @@ -229,7 +229,7 @@ func (s *AppsService) ListUserInstallations(ctx context.Context, opts *ListOptio // SuspendInstallation suspends the specified installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#suspend-an-app-installation +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#suspend-an-app-installation func (s *AppsService) SuspendInstallation(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("app/installations/%v/suspended", id) @@ -243,7 +243,7 @@ func (s *AppsService) SuspendInstallation(ctx context.Context, id int64) (*Respo // UnsuspendInstallation unsuspends the specified installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#unsuspend-an-app-installation +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#unsuspend-an-app-installation func (s *AppsService) UnsuspendInstallation(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("app/installations/%v/suspended", id) @@ -257,7 +257,7 @@ func (s *AppsService) UnsuspendInstallation(ctx context.Context, id int64) (*Res // DeleteInstallation deletes the specified installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#delete-an-installation-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#delete-an-installation-for-the-authenticated-app func (s *AppsService) DeleteInstallation(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("app/installations/%v", id) @@ -271,7 +271,7 @@ func (s *AppsService) DeleteInstallation(ctx context.Context, id int64) (*Respon // CreateInstallationToken creates a new installation token. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#create-an-installation-access-token-for-an-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#create-an-installation-access-token-for-an-app func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64, opts *InstallationTokenOptions) (*InstallationToken, *Response, error) { u := fmt.Sprintf("app/installations/%v/access_tokens", id) @@ -291,7 +291,7 @@ func (s *AppsService) CreateInstallationToken(ctx context.Context, id int64, opt // CreateAttachment creates a new attachment on user comment containing a url. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#create-a-content-attachment +// TODO: Find GitHub API docs. func (s *AppsService) CreateAttachment(ctx context.Context, contentReferenceID int64, title, body string) (*Attachment, *Response, error) { u := fmt.Sprintf("content_references/%v/attachments", contentReferenceID) payload := &Attachment{Title: String(title), Body: String(body)} @@ -314,14 +314,14 @@ func (s *AppsService) CreateAttachment(ctx context.Context, contentReferenceID i // FindOrganizationInstallation finds the organization's installation information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-an-organization-installation-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-an-organization-installation-for-the-authenticated-app func (s *AppsService) FindOrganizationInstallation(ctx context.Context, org string) (*Installation, *Response, error) { return s.getInstallation(ctx, fmt.Sprintf("orgs/%v/installation", org)) } // FindRepositoryInstallation finds the repository's installation information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-a-repository-installation-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-repository-installation-for-the-authenticated-app func (s *AppsService) FindRepositoryInstallation(ctx context.Context, owner, repo string) (*Installation, *Response, error) { return s.getInstallation(ctx, fmt.Sprintf("repos/%v/%v/installation", owner, repo)) } @@ -335,7 +335,7 @@ func (s *AppsService) FindRepositoryInstallationByID(ctx context.Context, id int // FindUserInstallation finds the user's installation information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#get-a-user-installation-for-the-authenticated-app +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#get-a-user-installation-for-the-authenticated-app func (s *AppsService) FindUserInstallation(ctx context.Context, user string) (*Installation, *Response, error) { return s.getInstallation(ctx, fmt.Sprintf("users/%v/installation", user)) } diff --git a/vendor/github.com/google/go-github/v42/github/apps_hooks.go b/vendor/github.com/google/go-github/v45/github/apps_hooks.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/apps_hooks.go rename to vendor/github.com/google/go-github/v45/github/apps_hooks.go index ed8396f442..e3bd2afc03 100644 --- a/vendor/github.com/google/go-github/v42/github/apps_hooks.go +++ b/vendor/github.com/google/go-github/v45/github/apps_hooks.go @@ -12,7 +12,7 @@ import ( // GetHookConfig returns the webhook configuration for a GitHub App. // The underlying transport must be authenticated as an app. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps#get-a-webhook-configuration-for-an-app +// GitHub API docs: https://docs.github.com/en/rest/apps#get-a-webhook-configuration-for-an-app func (s *AppsService) GetHookConfig(ctx context.Context) (*HookConfig, *Response, error) { req, err := s.client.NewRequest("GET", "app/hook/config", nil) if err != nil { @@ -31,7 +31,7 @@ func (s *AppsService) GetHookConfig(ctx context.Context) (*HookConfig, *Response // UpdateHookConfig updates the webhook configuration for a GitHub App. // The underlying transport must be authenticated as an app. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps#update-a-webhook-configuration-for-an-app +// GitHub API docs: https://docs.github.com/en/rest/apps#update-a-webhook-configuration-for-an-app func (s *AppsService) UpdateHookConfig(ctx context.Context, config *HookConfig) (*HookConfig, *Response, error) { req, err := s.client.NewRequest("PATCH", "app/hook/config", config) if err != nil { diff --git a/vendor/github.com/google/go-github/v42/github/apps_hooks_deliveries.go b/vendor/github.com/google/go-github/v45/github/apps_hooks_deliveries.go similarity index 84% rename from vendor/github.com/google/go-github/v42/github/apps_hooks_deliveries.go rename to vendor/github.com/google/go-github/v45/github/apps_hooks_deliveries.go index 0b631b80e8..33102f36d2 100644 --- a/vendor/github.com/google/go-github/v42/github/apps_hooks_deliveries.go +++ b/vendor/github.com/google/go-github/v45/github/apps_hooks_deliveries.go @@ -12,7 +12,7 @@ import ( // ListHookDeliveries lists deliveries of an App webhook. // -// GitHub API docs: https://docs.github.com/en/rest/reference/apps#list-deliveries-for-an-app-webhook +// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#list-deliveries-for-an-app-webhook func (s *AppsService) ListHookDeliveries(ctx context.Context, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { u, err := addOptions("app/hook/deliveries", opts) if err != nil { @@ -35,7 +35,7 @@ func (s *AppsService) ListHookDeliveries(ctx context.Context, opts *ListCursorOp // GetHookDelivery returns the App webhook delivery with the specified ID. // -// GitHub API docs: https://docs.github.com/en/rest/reference/apps#get-a-delivery-for-an-app-webhook +// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#get-a-delivery-for-an-app-webhook func (s *AppsService) GetHookDelivery(ctx context.Context, deliveryID int64) (*HookDelivery, *Response, error) { u := fmt.Sprintf("app/hook/deliveries/%v", deliveryID) req, err := s.client.NewRequest("GET", u, nil) @@ -54,7 +54,7 @@ func (s *AppsService) GetHookDelivery(ctx context.Context, deliveryID int64) (*H // RedeliverHookDelivery redelivers a delivery for an App webhook. // -// GitHub API docs: https://docs.github.com/en/rest/reference/apps#redeliver-a-delivery-for-an-app-webhook +// GitHub API docs: https://docs.github.com/en/rest/apps/webhooks#redeliver-a-delivery-for-an-app-webhook func (s *AppsService) RedeliverHookDelivery(ctx context.Context, deliveryID int64) (*HookDelivery, *Response, error) { u := fmt.Sprintf("app/hook/deliveries/%v/attempts", deliveryID) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/apps_installation.go b/vendor/github.com/google/go-github/v45/github/apps_installation.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/apps_installation.go rename to vendor/github.com/google/go-github/v45/github/apps_installation.go index 521860d6eb..b619080713 100644 --- a/vendor/github.com/google/go-github/v42/github/apps_installation.go +++ b/vendor/github.com/google/go-github/v45/github/apps_installation.go @@ -19,7 +19,7 @@ type ListRepositories struct { // ListRepos lists the repositories that are accessible to the authenticated installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-repositories-accessible-to-the-app-installation +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-app-installation func (s *AppsService) ListRepos(ctx context.Context, opts *ListOptions) (*ListRepositories, *Response, error) { u, err := addOptions("installation/repositories", opts) if err != nil { @@ -52,7 +52,7 @@ func (s *AppsService) ListRepos(ctx context.Context, opts *ListOptions) (*ListRe // ListUserRepos lists repositories that are accessible // to the authenticated user for an installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-repositories-accessible-to-the-user-access-token +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#list-repositories-accessible-to-the-user-access-token func (s *AppsService) ListUserRepos(ctx context.Context, id int64, opts *ListOptions) (*ListRepositories, *Response, error) { u := fmt.Sprintf("user/installations/%v/repositories", id) u, err := addOptions(u, opts) @@ -84,7 +84,7 @@ func (s *AppsService) ListUserRepos(ctx context.Context, id int64, opts *ListOpt // AddRepository adds a single repository to an installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#add-a-repository-to-an-app-installation +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#add-a-repository-to-an-app-installation func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int64) (*Repository, *Response, error) { u := fmt.Sprintf("user/installations/%v/repositories/%v", instID, repoID) req, err := s.client.NewRequest("PUT", u, nil) @@ -103,7 +103,7 @@ func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int64) ( // RemoveRepository removes a single repository from an installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#remove-a-repository-from-an-app-installation +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#remove-a-repository-from-an-app-installation func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int64) (*Response, error) { u := fmt.Sprintf("user/installations/%v/repositories/%v", instID, repoID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -116,7 +116,7 @@ func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int64 // RevokeInstallationToken revokes an installation token. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#revoke-an-installation-access-token +// GitHub API docs: https://docs.github.com/en/rest/apps/installations#revoke-an-installation-access-token func (s *AppsService) RevokeInstallationToken(ctx context.Context) (*Response, error) { u := "installation/token" req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/apps_manifest.go b/vendor/github.com/google/go-github/v45/github/apps_manifest.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/apps_manifest.go rename to vendor/github.com/google/go-github/v45/github/apps_manifest.go index 164f493999..fa4c85379c 100644 --- a/vendor/github.com/google/go-github/v42/github/apps_manifest.go +++ b/vendor/github.com/google/go-github/v45/github/apps_manifest.go @@ -31,7 +31,7 @@ type AppConfig struct { // CompleteAppManifest completes the App manifest handshake flow for the given // code. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#create-a-github-app-from-a-manifest +// GitHub API docs: https://docs.github.com/en/rest/apps/apps#create-a-github-app-from-a-manifest func (s *AppsService) CompleteAppManifest(ctx context.Context, code string) (*AppConfig, *Response, error) { u := fmt.Sprintf("app-manifests/%s/conversions", code) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/apps_marketplace.go b/vendor/github.com/google/go-github/v45/github/apps_marketplace.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/apps_marketplace.go rename to vendor/github.com/google/go-github/v45/github/apps_marketplace.go index 13d09f2efb..8253013684 100644 --- a/vendor/github.com/google/go-github/v42/github/apps_marketplace.go +++ b/vendor/github.com/google/go-github/v45/github/apps_marketplace.go @@ -13,7 +13,7 @@ import ( // MarketplaceService handles communication with the marketplace related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps#marketplace +// GitHub API docs: https://docs.github.com/en/rest/apps#marketplace type MarketplaceService struct { client *Client // Stubbed controls whether endpoints that return stubbed data are used @@ -21,7 +21,7 @@ type MarketplaceService struct { // for testing your GitHub Apps. Stubbed data is hard-coded and will not // change based on actual subscriptions. // - // GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps#testing-with-stubbed-endpoints + // GitHub API docs: https://docs.github.com/en/rest/apps#testing-with-stubbed-endpoints Stubbed bool } @@ -77,7 +77,7 @@ type MarketplacePlanAccount struct { // ListPlans lists all plans for your Marketplace listing. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps#list-plans +// GitHub API docs: https://docs.github.com/en/rest/apps#list-plans func (s *MarketplaceService) ListPlans(ctx context.Context, opts *ListOptions) ([]*MarketplacePlan, *Response, error) { uri := s.marketplaceURI("plans") u, err := addOptions(uri, opts) @@ -101,7 +101,7 @@ func (s *MarketplaceService) ListPlans(ctx context.Context, opts *ListOptions) ( // ListPlanAccountsForPlan lists all GitHub accounts (user or organization) on a specific plan. // -// GitHub API docs: https://docs.github.com/en/rest/reference/apps#list-accounts-for-a-plan +// GitHub API docs: https://docs.github.com/en/rest/apps#list-accounts-for-a-plan func (s *MarketplaceService) ListPlanAccountsForPlan(ctx context.Context, planID int64, opts *ListOptions) ([]*MarketplacePlanAccount, *Response, error) { uri := s.marketplaceURI(fmt.Sprintf("plans/%v/accounts", planID)) u, err := addOptions(uri, opts) @@ -125,7 +125,7 @@ func (s *MarketplaceService) ListPlanAccountsForPlan(ctx context.Context, planID // GetPlanAccountForAccount get GitHub account (user or organization) associated with an account. // -// GitHub API docs: https://docs.github.com/en/rest/reference/apps#get-a-subscription-plan-for-an-account +// GitHub API docs: https://docs.github.com/en/rest/apps#get-a-subscription-plan-for-an-account func (s *MarketplaceService) GetPlanAccountForAccount(ctx context.Context, accountID int64) (*MarketplacePlanAccount, *Response, error) { uri := s.marketplaceURI(fmt.Sprintf("accounts/%v", accountID)) @@ -145,8 +145,8 @@ func (s *MarketplaceService) GetPlanAccountForAccount(ctx context.Context, accou // ListMarketplacePurchasesForUser lists all GitHub marketplace purchases made by a user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-subscriptions-for-the-authenticated-user-stubbed -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#list-subscriptions-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/apps/marketplace#list-subscriptions-for-the-authenticated-user-stubbed +// GitHub API docs: https://docs.github.com/en/rest/apps/marketplace#list-subscriptions-for-the-authenticated-user func (s *MarketplaceService) ListMarketplacePurchasesForUser(ctx context.Context, opts *ListOptions) ([]*MarketplacePurchase, *Response, error) { uri := "user/marketplace_purchases" if s.Stubbed { diff --git a/vendor/github.com/google/go-github/v42/github/authorizations.go b/vendor/github.com/google/go-github/v45/github/authorizations.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/authorizations.go rename to vendor/github.com/google/go-github/v45/github/authorizations.go index 76a14c3db1..ea0897e362 100644 --- a/vendor/github.com/google/go-github/v42/github/authorizations.go +++ b/vendor/github.com/google/go-github/v45/github/authorizations.go @@ -12,7 +12,7 @@ import ( // Scope models a GitHub authorization scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/oauth/#scopes +// GitHub API docs: https://docs.github.com/en/rest/oauth/#scopes type Scope string // This is the set of scopes for GitHub API V3 @@ -50,7 +50,7 @@ const ( // This service requires HTTP Basic Authentication; it cannot be accessed using // an OAuth token. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/oauth_authorizations/ +// GitHub API docs: https://docs.github.com/en/rest/oauth-authorizations type AuthorizationsService service // Authorization represents an individual GitHub authorization. @@ -121,7 +121,7 @@ func (a AuthorizationRequest) String() string { // fields. That is, you may provide only one of "Scopes", or "AddScopes", or // "RemoveScopes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/oauth_authorizations/#update-an-existing-authorization +// GitHub API docs: https://docs.github.com/en/rest/oauth-authorizations#update-an-existing-authorization type AuthorizationUpdateRequest struct { Scopes []string `json:"scopes,omitempty"` AddScopes []string `json:"add_scopes,omitempty"` @@ -143,7 +143,7 @@ func (a AuthorizationUpdateRequest) String() string { // // The returned Authorization.User field will be populated. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#check-a-token +// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#check-a-token func (s *AuthorizationsService) Check(ctx context.Context, clientID, accessToken string) (*Authorization, *Response, error) { u := fmt.Sprintf("applications/%v/token", clientID) @@ -176,7 +176,7 @@ func (s *AuthorizationsService) Check(ctx context.Context, clientID, accessToken // // The returned Authorization.User field will be populated. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#reset-a-token +// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#reset-a-token func (s *AuthorizationsService) Reset(ctx context.Context, clientID, accessToken string) (*Authorization, *Response, error) { u := fmt.Sprintf("applications/%v/token", clientID) @@ -205,7 +205,7 @@ func (s *AuthorizationsService) Reset(ctx context.Context, clientID, accessToken // username is the OAuth application clientID, and the password is its // clientSecret. Invalid tokens will return a 404 Not Found. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#delete-an-app-token +// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-token func (s *AuthorizationsService) Revoke(ctx context.Context, clientID, accessToken string) (*Response, error) { u := fmt.Sprintf("applications/%v/token", clientID) @@ -226,7 +226,7 @@ func (s *AuthorizationsService) Revoke(ctx context.Context, clientID, accessToke // grant will also delete all OAuth tokens associated with the application for // the user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/apps/#delete-an-app-authorization +// GitHub API docs: https://docs.github.com/en/rest/apps/oauth-applications#delete-an-app-authorization func (s *AuthorizationsService) DeleteGrant(ctx context.Context, clientID, accessToken string) (*Response, error) { u := fmt.Sprintf("applications/%v/grant", clientID) diff --git a/vendor/github.com/google/go-github/v42/github/billing.go b/vendor/github.com/google/go-github/v45/github/billing.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/billing.go rename to vendor/github.com/google/go-github/v45/github/billing.go index 12a79fa60a..d516cd0c29 100644 --- a/vendor/github.com/google/go-github/v42/github/billing.go +++ b/vendor/github.com/google/go-github/v45/github/billing.go @@ -13,13 +13,13 @@ import ( // BillingService provides access to the billing related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing +// GitHub API docs: https://docs.github.com/en/rest/billing type BillingService service // ActionBilling represents a GitHub Action billing. type ActionBilling struct { TotalMinutesUsed int `json:"total_minutes_used"` - TotalPaidMinutesUsed int `json:"total_paid_minutes_used"` + TotalPaidMinutesUsed float64 `json:"total_paid_minutes_used"` IncludedMinutes int `json:"included_minutes"` MinutesUsedBreakdown MinutesUsedBreakdown `json:"minutes_used_breakdown"` } @@ -65,7 +65,7 @@ type AdvancedSecurityCommittersBreakdown struct { // GetActionsBillingOrg returns the summary of the free and paid GitHub Actions minutes used for an Org. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-github-actions-billing-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-actions-billing-for-an-organization func (s *BillingService) GetActionsBillingOrg(ctx context.Context, org string) (*ActionBilling, *Response, error) { u := fmt.Sprintf("orgs/%v/settings/billing/actions", org) req, err := s.client.NewRequest("GET", u, nil) @@ -79,12 +79,12 @@ func (s *BillingService) GetActionsBillingOrg(ctx context.Context, org string) ( return nil, resp, err } - return actionsOrgBilling, resp, err + return actionsOrgBilling, resp, nil } // GetPackagesBillingOrg returns the free and paid storage used for GitHub Packages in gigabytes for an Org. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-github-packages-billing-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-packages-billing-for-an-organization func (s *BillingService) GetPackagesBillingOrg(ctx context.Context, org string) (*PackageBilling, *Response, error) { u := fmt.Sprintf("orgs/%v/settings/billing/packages", org) req, err := s.client.NewRequest("GET", u, nil) @@ -98,13 +98,13 @@ func (s *BillingService) GetPackagesBillingOrg(ctx context.Context, org string) return nil, resp, err } - return packagesOrgBilling, resp, err + return packagesOrgBilling, resp, nil } // GetStorageBillingOrg returns the estimated paid and estimated total storage used for GitHub Actions // and GitHub Packages in gigabytes for an Org. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-shared-storage-billing-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/billing#get-shared-storage-billing-for-an-organization func (s *BillingService) GetStorageBillingOrg(ctx context.Context, org string) (*StorageBilling, *Response, error) { u := fmt.Sprintf("orgs/%v/settings/billing/shared-storage", org) req, err := s.client.NewRequest("GET", u, nil) @@ -118,12 +118,12 @@ func (s *BillingService) GetStorageBillingOrg(ctx context.Context, org string) ( return nil, resp, err } - return storageOrgBilling, resp, err + return storageOrgBilling, resp, nil } // GetAdvancedSecurityActiveCommittersOrg returns the GitHub Advanced Security active committers for an organization per repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-github-advanced-security-active-committers-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-advanced-security-active-committers-for-an-organization func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Context, org string) (*ActiveCommitters, *Response, error) { u := fmt.Sprintf("orgs/%v/settings/billing/advanced-security", org) req, err := s.client.NewRequest("GET", u, nil) @@ -137,12 +137,12 @@ func (s *BillingService) GetAdvancedSecurityActiveCommittersOrg(ctx context.Cont return nil, resp, err } - return activeOrgCommitters, resp, err + return activeOrgCommitters, resp, nil } // GetActionsBillingUser returns the summary of the free and paid GitHub Actions minutes used for a user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-github-actions-billing-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-actions-billing-for-a-user func (s *BillingService) GetActionsBillingUser(ctx context.Context, user string) (*ActionBilling, *Response, error) { u := fmt.Sprintf("users/%v/settings/billing/actions", user) req, err := s.client.NewRequest("GET", u, nil) @@ -156,12 +156,12 @@ func (s *BillingService) GetActionsBillingUser(ctx context.Context, user string) return nil, resp, err } - return actionsUserBilling, resp, err + return actionsUserBilling, resp, nil } // GetPackagesBillingUser returns the free and paid storage used for GitHub Packages in gigabytes for a user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-github-packages-billing-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/billing#get-github-packages-billing-for-a-user func (s *BillingService) GetPackagesBillingUser(ctx context.Context, user string) (*PackageBilling, *Response, error) { u := fmt.Sprintf("users/%v/settings/billing/packages", user) req, err := s.client.NewRequest("GET", u, nil) @@ -175,13 +175,13 @@ func (s *BillingService) GetPackagesBillingUser(ctx context.Context, user string return nil, resp, err } - return packagesUserBilling, resp, err + return packagesUserBilling, resp, nil } // GetStorageBillingUser returns the estimated paid and estimated total storage used for GitHub Actions // and GitHub Packages in gigabytes for a user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/billing#get-shared-storage-billing-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/billing#get-shared-storage-billing-for-a-user func (s *BillingService) GetStorageBillingUser(ctx context.Context, user string) (*StorageBilling, *Response, error) { u := fmt.Sprintf("users/%v/settings/billing/shared-storage", user) req, err := s.client.NewRequest("GET", u, nil) @@ -195,5 +195,5 @@ func (s *BillingService) GetStorageBillingUser(ctx context.Context, user string) return nil, resp, err } - return storageUserBilling, resp, err + return storageUserBilling, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/checks.go b/vendor/github.com/google/go-github/v45/github/checks.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/checks.go rename to vendor/github.com/google/go-github/v45/github/checks.go index 253d351896..12d08530ca 100644 --- a/vendor/github.com/google/go-github/v42/github/checks.go +++ b/vendor/github.com/google/go-github/v45/github/checks.go @@ -13,7 +13,7 @@ import ( // ChecksService provides access to the Checks API in the // GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/ +// GitHub API docs: https://docs.github.com/en/rest/checks/ type ChecksService service // CheckRun represents a GitHub check run on a repository associated with a GitHub app. @@ -98,7 +98,7 @@ func (c CheckSuite) String() string { // GetCheckRun gets a check-run for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#get-a-check-run +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#get-a-check-run func (s *ChecksService) GetCheckRun(ctx context.Context, owner, repo string, checkRunID int64) (*CheckRun, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) req, err := s.client.NewRequest("GET", u, nil) @@ -106,6 +106,8 @@ func (s *ChecksService) GetCheckRun(ctx context.Context, owner, repo string, che return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + checkRun := new(CheckRun) resp, err := s.client.Do(ctx, req, checkRun) if err != nil { @@ -117,7 +119,7 @@ func (s *ChecksService) GetCheckRun(ctx context.Context, owner, repo string, che // GetCheckSuite gets a single check suite. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#get-a-check-suite +// GitHub API docs: https://docs.github.com/en/rest/checks/suites#get-a-check-suite func (s *ChecksService) GetCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*CheckSuite, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-suites/%v", owner, repo, checkSuiteID) req, err := s.client.NewRequest("GET", u, nil) @@ -125,6 +127,8 @@ func (s *ChecksService) GetCheckSuite(ctx context.Context, owner, repo string, c return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + checkSuite := new(CheckSuite) resp, err := s.client.Do(ctx, req, checkSuite) if err != nil { @@ -157,7 +161,7 @@ type CheckRunAction struct { // CreateCheckRun creates a check run for repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#create-a-check-run +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#create-a-check-run func (s *ChecksService) CreateCheckRun(ctx context.Context, owner, repo string, opts CreateCheckRunOptions) (*CheckRun, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-runs", owner, repo) req, err := s.client.NewRequest("POST", u, opts) @@ -165,6 +169,8 @@ func (s *ChecksService) CreateCheckRun(ctx context.Context, owner, repo string, return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + checkRun := new(CheckRun) resp, err := s.client.Do(ctx, req, checkRun) if err != nil { @@ -188,7 +194,7 @@ type UpdateCheckRunOptions struct { // UpdateCheckRun updates a check run for a specific commit in a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#update-a-check-run +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#update-a-check-run func (s *ChecksService) UpdateCheckRun(ctx context.Context, owner, repo string, checkRunID int64, opts UpdateCheckRunOptions) (*CheckRun, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-runs/%v", owner, repo, checkRunID) req, err := s.client.NewRequest("PATCH", u, opts) @@ -196,6 +202,8 @@ func (s *ChecksService) UpdateCheckRun(ctx context.Context, owner, repo string, return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + checkRun := new(CheckRun) resp, err := s.client.Do(ctx, req, checkRun) if err != nil { @@ -207,7 +215,7 @@ func (s *ChecksService) UpdateCheckRun(ctx context.Context, owner, repo string, // ListCheckRunAnnotations lists the annotations for a check run. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#list-check-run-annotations +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-run-annotations func (s *ChecksService) ListCheckRunAnnotations(ctx context.Context, owner, repo string, checkRunID int64, opts *ListOptions) ([]*CheckRunAnnotation, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-runs/%v/annotations", owner, repo, checkRunID) u, err := addOptions(u, opts) @@ -220,6 +228,8 @@ func (s *ChecksService) ListCheckRunAnnotations(ctx context.Context, owner, repo return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + var checkRunAnnotations []*CheckRunAnnotation resp, err := s.client.Do(ctx, req, &checkRunAnnotations) if err != nil { @@ -247,7 +257,7 @@ type ListCheckRunsResults struct { // ListCheckRunsForRef lists check runs for a specific ref. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#list-check-runs-for-a-git-reference +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-runs-for-a-git-reference func (s *ChecksService) ListCheckRunsForRef(ctx context.Context, owner, repo, ref string, opts *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/check-runs", owner, repo, refURLEscape(ref)) u, err := addOptions(u, opts) @@ -260,6 +270,8 @@ func (s *ChecksService) ListCheckRunsForRef(ctx context.Context, owner, repo, re return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + var checkRunResults *ListCheckRunsResults resp, err := s.client.Do(ctx, req, &checkRunResults) if err != nil { @@ -271,7 +283,7 @@ func (s *ChecksService) ListCheckRunsForRef(ctx context.Context, owner, repo, re // ListCheckRunsCheckSuite lists check runs for a check suite. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#list-check-runs-in-a-check-suite +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#list-check-runs-in-a-check-suite func (s *ChecksService) ListCheckRunsCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64, opts *ListCheckRunsOptions) (*ListCheckRunsResults, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-suites/%v/check-runs", owner, repo, checkSuiteID) u, err := addOptions(u, opts) @@ -284,6 +296,8 @@ func (s *ChecksService) ListCheckRunsCheckSuite(ctx context.Context, owner, repo return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + var checkRunResults *ListCheckRunsResults resp, err := s.client.Do(ctx, req, &checkRunResults) if err != nil { @@ -293,6 +307,22 @@ func (s *ChecksService) ListCheckRunsCheckSuite(ctx context.Context, owner, repo return checkRunResults, resp, nil } +// ReRequestCheckRun triggers GitHub to rerequest an existing check run. +// +// GitHub API docs: https://docs.github.com/en/rest/checks/runs#rerequest-a-check-run +func (s *ChecksService) ReRequestCheckRun(ctx context.Context, owner, repo string, checkRunID int64) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/check-runs/%v/rerequest", owner, repo, checkRunID) + + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + + return s.client.Do(ctx, req, nil) +} + // ListCheckSuiteOptions represents parameters to list check suites. type ListCheckSuiteOptions struct { CheckName *string `url:"check_name,omitempty"` // Filters checks suites by the name of the check run. @@ -309,7 +339,7 @@ type ListCheckSuiteResults struct { // ListCheckSuitesForRef lists check suite for a specific ref. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#list-check-suites-for-a-git-reference +// GitHub API docs: https://docs.github.com/en/rest/checks/suites#list-check-suites-for-a-git-reference func (s *ChecksService) ListCheckSuitesForRef(ctx context.Context, owner, repo, ref string, opts *ListCheckSuiteOptions) (*ListCheckSuiteResults, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/check-suites", owner, repo, refURLEscape(ref)) u, err := addOptions(u, opts) @@ -322,6 +352,8 @@ func (s *ChecksService) ListCheckSuitesForRef(ctx context.Context, owner, repo, return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + var checkSuiteResults *ListCheckSuiteResults resp, err := s.client.Do(ctx, req, &checkSuiteResults) if err != nil { @@ -355,7 +387,7 @@ type PreferenceList struct { // SetCheckSuitePreferences changes the default automatic flow when creating check suites. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#update-repository-preferences-for-check-suites +// GitHub API docs: https://docs.github.com/en/rest/checks/suites#update-repository-preferences-for-check-suites func (s *ChecksService) SetCheckSuitePreferences(ctx context.Context, owner, repo string, opts CheckSuitePreferenceOptions) (*CheckSuitePreferenceResults, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-suites/preferences", owner, repo) req, err := s.client.NewRequest("PATCH", u, opts) @@ -363,6 +395,8 @@ func (s *ChecksService) SetCheckSuitePreferences(ctx context.Context, owner, rep return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + var checkSuitePrefResults *CheckSuitePreferenceResults resp, err := s.client.Do(ctx, req, &checkSuitePrefResults) if err != nil { @@ -380,7 +414,7 @@ type CreateCheckSuiteOptions struct { // CreateCheckSuite manually creates a check suite for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#create-a-check-suite +// GitHub API docs: https://docs.github.com/en/rest/checks/suites#create-a-check-suite func (s *ChecksService) CreateCheckSuite(ctx context.Context, owner, repo string, opts CreateCheckSuiteOptions) (*CheckSuite, *Response, error) { u := fmt.Sprintf("repos/%v/%v/check-suites", owner, repo) req, err := s.client.NewRequest("POST", u, opts) @@ -388,6 +422,8 @@ func (s *ChecksService) CreateCheckSuite(ctx context.Context, owner, repo string return nil, nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + checkSuite := new(CheckSuite) resp, err := s.client.Do(ctx, req, checkSuite) if err != nil { @@ -399,7 +435,7 @@ func (s *ChecksService) CreateCheckSuite(ctx context.Context, owner, repo string // ReRequestCheckSuite triggers GitHub to rerequest an existing check suite, without pushing new code to a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/checks/#rerequest-a-check-suite +// GitHub API docs: https://docs.github.com/en/rest/checks/suites#rerequest-a-check-suite func (s *ChecksService) ReRequestCheckSuite(ctx context.Context, owner, repo string, checkSuiteID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/check-suites/%v/rerequest", owner, repo, checkSuiteID) @@ -408,6 +444,8 @@ func (s *ChecksService) ReRequestCheckSuite(ctx context.Context, owner, repo str return nil, err } + req.Header.Set("Accept", mediaTypeCheckRunsPreview) + resp, err := s.client.Do(ctx, req, nil) return resp, err } diff --git a/vendor/github.com/google/go-github/v42/github/code-scanning.go b/vendor/github.com/google/go-github/v45/github/code-scanning.go similarity index 75% rename from vendor/github.com/google/go-github/v42/github/code-scanning.go rename to vendor/github.com/google/go-github/v45/github/code-scanning.go index 9616f3a26d..df8ed86b51 100644 --- a/vendor/github.com/google/go-github/v42/github/code-scanning.go +++ b/vendor/github.com/google/go-github/v45/github/code-scanning.go @@ -15,7 +15,7 @@ import ( // CodeScanningService handles communication with the code scanning related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/code-scanning/ +// GitHub API docs: https://docs.github.com/en/rest/code-scanning type CodeScanningService service // Rule represents the complete details of GitHub Code Scanning alert type. @@ -65,24 +65,29 @@ type Tool struct { // Alert represents an individual GitHub Code Scanning Alert on a single repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#list-code-scanning-alerts-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/code-scanning type Alert struct { - RuleID *string `json:"rule_id,omitempty"` - RuleSeverity *string `json:"rule_severity,omitempty"` - RuleDescription *string `json:"rule_description,omitempty"` - Rule *Rule `json:"rule,omitempty"` - Tool *Tool `json:"tool,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - State *string `json:"state,omitempty"` - ClosedBy *User `json:"closed_by,omitempty"` - ClosedAt *Timestamp `json:"closed_at,omitempty"` - URL *string `json:"url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - MostRecentInstance *MostRecentInstance `json:"most_recent_instance,omitempty"` - DismissedBy *User `json:"dismissed_by,omitempty"` - DismissedAt *Timestamp `json:"dismissed_at,omitempty"` - DismissedReason *string `json:"dismissed_reason,omitempty"` - InstancesURL *string `json:"instances_url,omitempty"` + Number *int `json:"number,omitempty"` + Repository *Repository `json:"repository,omitempty"` + RuleID *string `json:"rule_id,omitempty"` + RuleSeverity *string `json:"rule_severity,omitempty"` + RuleDescription *string `json:"rule_description,omitempty"` + Rule *Rule `json:"rule,omitempty"` + Tool *Tool `json:"tool,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + FixedAt *Timestamp `json:"fixed_at,omitempty"` + State *string `json:"state,omitempty"` + ClosedBy *User `json:"closed_by,omitempty"` + ClosedAt *Timestamp `json:"closed_at,omitempty"` + URL *string `json:"url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + MostRecentInstance *MostRecentInstance `json:"most_recent_instance,omitempty"` + Instances []*MostRecentInstance `json:"instances,omitempty"` + DismissedBy *User `json:"dismissed_by,omitempty"` + DismissedAt *Timestamp `json:"dismissed_at,omitempty"` + DismissedReason *string `json:"dismissed_reason,omitempty"` + InstancesURL *string `json:"instances_url,omitempty"` } // ID returns the ID associated with an alert. It is the number at the end of the security alert's URL. @@ -132,7 +137,7 @@ type AnalysesListOptions struct { // ScanningAnalysis represents an individual GitHub Code Scanning ScanningAnalysis on a single repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#list-code-scanning-analyses-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/code-scanning type ScanningAnalysis struct { ID *int64 `json:"id,omitempty"` Ref *string `json:"ref,omitempty"` @@ -153,7 +158,7 @@ type ScanningAnalysis struct { // SarifAnalysis specifies the results of a code scanning job. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#upload-an-analysis-as-sarif-data +// GitHub API docs: https://docs.github.com/en/rest/code-scanning type SarifAnalysis struct { CommitSHA *string `json:"commit_sha,omitempty"` Ref *string `json:"ref,omitempty"` @@ -165,19 +170,46 @@ type SarifAnalysis struct { // SarifID identifies a sarif analysis upload. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#upload-an-analysis-as-sarif-data +// GitHub API docs: https://docs.github.com/en/rest/code-scanning type SarifID struct { ID *string `json:"id,omitempty"` URL *string `json:"url,omitempty"` } +// ListAlertsForOrg lists code scanning alerts for an org. +// +// You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events +// read permission to use this endpoint. +// +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-an-organization +func (s *CodeScanningService) ListAlertsForOrg(ctx context.Context, org string, opts *AlertListOptions) ([]*Alert, *Response, error) { + u := fmt.Sprintf("orgs/%v/code-scanning/alerts", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alerts []*Alert + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + + return alerts, resp, nil +} + // ListAlertsForRepo lists code scanning alerts for a repository. // // Lists all open code scanning alerts for the default branch (usually master) and protected branches in a repository. // You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events // read permission to use this endpoint. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/code-scanning/#list-code-scanning-alerts-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-alerts-for-a-repository func (s *CodeScanningService) ListAlertsForRepo(ctx context.Context, owner, repo string, opts *AlertListOptions) ([]*Alert, *Response, error) { u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts", owner, repo) u, err := addOptions(u, opts) @@ -206,7 +238,7 @@ func (s *CodeScanningService) ListAlertsForRepo(ctx context.Context, owner, repo // // The security alert_id is the number at the end of the security alert's URL. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/code-scanning/#get-a-code-scanning-alert +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-alert func (s *CodeScanningService) GetAlert(ctx context.Context, owner, repo string, id int64) (*Alert, *Response, error) { u := fmt.Sprintf("repos/%v/%v/code-scanning/alerts/%v", owner, repo, id) @@ -230,7 +262,7 @@ func (s *CodeScanningService) GetAlert(ctx context.Context, owner, repo string, // You must use an access token with the security_events scope to use this endpoint. GitHub Apps must have the security_events // write permission to use this endpoint. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#upload-an-analysis-as-sarif-data +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#upload-an-analysis-as-sarif-data func (s *CodeScanningService) UploadSarif(ctx context.Context, owner, repo string, sarif *SarifAnalysis) (*SarifID, *Response, error) { u := fmt.Sprintf("repos/%v/%v/code-scanning/sarifs", owner, repo) @@ -254,7 +286,7 @@ func (s *CodeScanningService) UploadSarif(ctx context.Context, owner, repo strin // You must use an access token with the security_events scope to use this endpoint. // GitHub Apps must have the security_events read permission to use this endpoint. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#list-code-scanning-analyses-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#list-code-scanning-analyses-for-a-repository func (s *CodeScanningService) ListAnalysesForRepo(ctx context.Context, owner, repo string, opts *AnalysesListOptions) ([]*ScanningAnalysis, *Response, error) { u := fmt.Sprintf("repos/%v/%v/code-scanning/analyses", owner, repo) u, err := addOptions(u, opts) @@ -283,7 +315,7 @@ func (s *CodeScanningService) ListAnalysesForRepo(ctx context.Context, owner, re // // The security analysis_id is the ID of the analysis, as returned from the ListAnalysesForRepo operation. // -// GitHub API docs: https://docs.github.com/en/rest/reference/code-scanning#get-a-code-scanning-analysis-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/code-scanning#get-a-code-scanning-analysis-for-a-repository func (s *CodeScanningService) GetAnalysis(ctx context.Context, owner, repo string, id int64) (*ScanningAnalysis, *Response, error) { u := fmt.Sprintf("repos/%v/%v/code-scanning/analyses/%v", owner, repo, id) diff --git a/vendor/github.com/google/go-github/v42/github/dependabot.go b/vendor/github.com/google/go-github/v45/github/dependabot.go similarity index 77% rename from vendor/github.com/google/go-github/v42/github/dependabot.go rename to vendor/github.com/google/go-github/v45/github/dependabot.go index 8ee0c0c7d8..07e68b506a 100644 --- a/vendor/github.com/google/go-github/v42/github/dependabot.go +++ b/vendor/github.com/google/go-github/v45/github/dependabot.go @@ -8,5 +8,5 @@ package github // DependabotService handles communication with the Dependabot related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/dependabot/ +// GitHub API docs: https://docs.github.com/en/rest/dependabot/ type DependabotService service diff --git a/vendor/github.com/google/go-github/v42/github/dependabot_secrets.go b/vendor/github.com/google/go-github/v45/github/dependabot_secrets.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/dependabot_secrets.go rename to vendor/github.com/google/go-github/v45/github/dependabot_secrets.go index a6645339cc..f51f3396bd 100644 --- a/vendor/github.com/google/go-github/v42/github/dependabot_secrets.go +++ b/vendor/github.com/google/go-github/v45/github/dependabot_secrets.go @@ -27,7 +27,7 @@ func (s *DependabotService) getPublicKey(ctx context.Context, url string) (*Publ // GetRepoPublicKey gets a public key that should be used for Dependabot secret encryption. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#get-a-repository-public-key +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-a-repository-public-key func (s *DependabotService) GetRepoPublicKey(ctx context.Context, owner, repo string) (*PublicKey, *Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/public-key", owner, repo) return s.getPublicKey(ctx, url) @@ -35,7 +35,7 @@ func (s *DependabotService) GetRepoPublicKey(ctx context.Context, owner, repo st // GetOrgPublicKey gets a public key that should be used for Dependabot secret encryption. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#get-an-organization-public-key +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-an-organization-public-key func (s *DependabotService) GetOrgPublicKey(ctx context.Context, org string) (*PublicKey, *Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/public-key", org) return s.getPublicKey(ctx, url) @@ -64,7 +64,7 @@ func (s *DependabotService) listSecrets(ctx context.Context, url string, opts *L // ListRepoSecrets lists all Dependabot secrets available in a repository // without revealing their encrypted values. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#list-repository-secrets +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-repository-secrets func (s *DependabotService) ListRepoSecrets(ctx context.Context, owner, repo string, opts *ListOptions) (*Secrets, *Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets", owner, repo) return s.listSecrets(ctx, url, opts) @@ -73,7 +73,7 @@ func (s *DependabotService) ListRepoSecrets(ctx context.Context, owner, repo str // ListOrgSecrets lists all Dependabot secrets available in an organization // without revealing their encrypted values. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#list-organization-secrets +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-organization-secrets func (s *DependabotService) ListOrgSecrets(ctx context.Context, org string, opts *ListOptions) (*Secrets, *Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets", org) return s.listSecrets(ctx, url, opts) @@ -96,7 +96,7 @@ func (s *DependabotService) getSecret(ctx context.Context, url string) (*Secret, // GetRepoSecret gets a single repository Dependabot secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#get-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-a-repository-secret func (s *DependabotService) GetRepoSecret(ctx context.Context, owner, repo, name string) (*Secret, *Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, name) return s.getSecret(ctx, url) @@ -104,7 +104,7 @@ func (s *DependabotService) GetRepoSecret(ctx context.Context, owner, repo, name // GetOrgSecret gets a single organization Dependabot secret without revealing its encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#get-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#get-an-organization-secret func (s *DependabotService) GetOrgSecret(ctx context.Context, org, name string) (*Secret, *Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, name) return s.getSecret(ctx, url) @@ -121,7 +121,7 @@ func (s *DependabotService) putSecret(ctx context.Context, url string, eSecret * // CreateOrUpdateRepoSecret creates or updates a repository Dependabot secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#create-or-update-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-a-repository-secret func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, repo string, eSecret *EncryptedSecret) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, eSecret.Name) return s.putSecret(ctx, url, eSecret) @@ -129,7 +129,7 @@ func (s *DependabotService) CreateOrUpdateRepoSecret(ctx context.Context, owner, // CreateOrUpdateOrgSecret creates or updates an organization Dependabot secret with an encrypted value. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#create-or-update-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#create-or-update-an-organization-secret func (s *DependabotService) CreateOrUpdateOrgSecret(ctx context.Context, org string, eSecret *EncryptedSecret) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, eSecret.Name) return s.putSecret(ctx, url, eSecret) @@ -146,7 +146,7 @@ func (s *DependabotService) deleteSecret(ctx context.Context, url string) (*Resp // DeleteRepoSecret deletes a Dependabot secret in a repository using the secret name. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#delete-a-repository-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#delete-a-repository-secret func (s *DependabotService) DeleteRepoSecret(ctx context.Context, owner, repo, name string) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/dependabot/secrets/%v", owner, repo, name) return s.deleteSecret(ctx, url) @@ -154,7 +154,7 @@ func (s *DependabotService) DeleteRepoSecret(ctx context.Context, owner, repo, n // DeleteOrgSecret deletes a Dependabot secret in an organization using the secret name. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#delete-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#delete-an-organization-secret func (s *DependabotService) DeleteOrgSecret(ctx context.Context, org, name string) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v", org, name) return s.deleteSecret(ctx, url) @@ -162,7 +162,7 @@ func (s *DependabotService) DeleteOrgSecret(ctx context.Context, org, name strin // ListSelectedReposForOrgSecret lists all repositories that have access to a Dependabot secret. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#list-selected-repositories-for-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#list-selected-repositories-for-an-organization-secret func (s *DependabotService) ListSelectedReposForOrgSecret(ctx context.Context, org, name string, opts *ListOptions) (*SelectedReposList, *Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories", org, name) u, err := addOptions(url, opts) @@ -186,7 +186,7 @@ func (s *DependabotService) ListSelectedReposForOrgSecret(ctx context.Context, o // SetSelectedReposForOrgSecret sets the repositories that have access to a Dependabot secret. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#set-selected-repositories-for-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#set-selected-repositories-for-an-organization-secret func (s *DependabotService) SetSelectedReposForOrgSecret(ctx context.Context, org, name string, ids SelectedRepoIDs) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories", org, name) type repoIDs struct { @@ -203,7 +203,7 @@ func (s *DependabotService) SetSelectedReposForOrgSecret(ctx context.Context, or // AddSelectedRepoToOrgSecret adds a repository to an organization Dependabot secret. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#add-selected-repository-to-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#add-selected-repository-to-an-organization-secret func (s *DependabotService) AddSelectedRepoToOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories/%v", org, name, *repo.ID) req, err := s.client.NewRequest("PUT", url, nil) @@ -216,7 +216,7 @@ func (s *DependabotService) AddSelectedRepoToOrgSecret(ctx context.Context, org, // RemoveSelectedRepoFromOrgSecret removes a repository from an organization Dependabot secret. // -// GitHub API docs: https://docs.github.com/en/rest/reference/dependabot#remove-selected-repository-from-an-organization-secret +// GitHub API docs: https://docs.github.com/en/rest/dependabot/secrets#remove-selected-repository-from-an-organization-secret func (s *DependabotService) RemoveSelectedRepoFromOrgSecret(ctx context.Context, org, name string, repo *Repository) (*Response, error) { url := fmt.Sprintf("orgs/%v/dependabot/secrets/%v/repositories/%v", org, name, *repo.ID) req, err := s.client.NewRequest("DELETE", url, nil) diff --git a/vendor/github.com/google/go-github/v42/github/doc.go b/vendor/github.com/google/go-github/v45/github/doc.go similarity index 95% rename from vendor/github.com/google/go-github/v42/github/doc.go rename to vendor/github.com/google/go-github/v45/github/doc.go index 556d27c4bf..38cda12b2b 100644 --- a/vendor/github.com/google/go-github/v42/github/doc.go +++ b/vendor/github.com/google/go-github/v45/github/doc.go @@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API. Usage: - import "github.com/google/go-github/v42/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) + import "github.com/google/go-github/v45/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) import "github.com/google/go-github/github" // with go modules disabled Construct a new GitHub client, then use the various services on the client to @@ -29,7 +29,7 @@ Some API methods have optional parameters that can be passed. For example: The services of a client divide the API into logical chunks and correspond to the structure of the GitHub API documentation at -https://docs.github.com/en/free-pro-team@latest/rest/reference/. +https://docs.github.com/en/rest . NOTE: Using the https://godoc.org/context package, one can easily pass cancelation signals and deadlines to various services of the client for @@ -137,7 +137,7 @@ For secondary rate limits, you can check if its type is *github.AbuseRateLimitEr } Learn more about GitHub rate limiting at -https://docs.github.com/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#rate-limiting. +https://docs.github.com/en/rest/rate-limit . Accepted Status @@ -163,7 +163,7 @@ instead designed to work with a caching http.Transport. We recommend using https://github.com/gregjones/httpcache for that. Learn more about GitHub conditional requests at -https://docs.github.com/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#conditional-requests. +https://docs.github.com/en/rest/overview/resources-in-the-rest-api#conditional-requests. Creating and Updating Resources diff --git a/vendor/github.com/google/go-github/v42/github/enterprise.go b/vendor/github.com/google/go-github/v45/github/enterprise.go similarity index 75% rename from vendor/github.com/google/go-github/v42/github/enterprise.go rename to vendor/github.com/google/go-github/v45/github/enterprise.go index f6a5af8390..1c9b069566 100644 --- a/vendor/github.com/google/go-github/v42/github/enterprise.go +++ b/vendor/github.com/google/go-github/v45/github/enterprise.go @@ -8,5 +8,5 @@ package github // EnterpriseService provides access to the enterprise related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise-admin/ +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/ type EnterpriseService service diff --git a/vendor/github.com/google/go-github/v42/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go similarity index 80% rename from vendor/github.com/google/go-github/v42/github/enterprise_actions_runners.go rename to vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go index d2758fe838..f2ba166360 100644 --- a/vendor/github.com/google/go-github/v42/github/enterprise_actions_runners.go +++ b/vendor/github.com/google/go-github/v45/github/enterprise_actions_runners.go @@ -12,7 +12,7 @@ import ( // CreateRegistrationToken creates a token that can be used to add a self-hosted runner. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise-admin/#create-a-registration-token-for-an-enterprise +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#create-a-registration-token-for-an-enterprise func (s *EnterpriseService) CreateRegistrationToken(ctx context.Context, enterprise string) (*RegistrationToken, *Response, error) { u := fmt.Sprintf("enterprises/%v/actions/runners/registration-token", enterprise) @@ -32,7 +32,7 @@ func (s *EnterpriseService) CreateRegistrationToken(ctx context.Context, enterpr // ListRunners lists all the self-hosted runners for a enterprise. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise-admin/#list-self-hosted-runners-for-an-enterprise +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-enterprise func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, opts *ListOptions) (*Runners, *Response, error) { u := fmt.Sprintf("enterprises/%v/actions/runners", enterprise) u, err := addOptions(u, opts) @@ -56,7 +56,7 @@ func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, // RemoveRunner forces the removal of a self-hosted runner from an enterprise using the runner id. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/enterprise-admin/#delete-a-self-hosted-runner-from-an-enterprise +// GitHub API docs: https://docs.github.com/en/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-enterprise func (s *EnterpriseService) RemoveRunner(ctx context.Context, enterprise string, runnerID int64) (*Response, error) { u := fmt.Sprintf("enterprises/%v/actions/runners/%v", enterprise, runnerID) diff --git a/vendor/github.com/google/go-github/v42/github/enterprise_audit_log.go b/vendor/github.com/google/go-github/v45/github/enterprise_audit_log.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/enterprise_audit_log.go rename to vendor/github.com/google/go-github/v45/github/enterprise_audit_log.go index b889a7e570..4064867338 100644 --- a/vendor/github.com/google/go-github/v42/github/enterprise_audit_log.go +++ b/vendor/github.com/google/go-github/v45/github/enterprise_audit_log.go @@ -12,7 +12,7 @@ import ( // GetAuditLog gets the audit-log entries for an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/enterprise-admin#get-the-audit-log-for-an-enterprise +// GitHub API docs: https://docs.github.com/en/rest/enterprise-admin/audit-log#get-the-audit-log-for-an-enterprise func (s *EnterpriseService) GetAuditLog(ctx context.Context, enterprise string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { u := fmt.Sprintf("enterprises/%v/audit-log", enterprise) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/event.go b/vendor/github.com/google/go-github/v45/github/event.go similarity index 95% rename from vendor/github.com/google/go-github/v42/github/event.go rename to vendor/github.com/google/go-github/v45/github/event.go index 136abb273d..5a052de09c 100644 --- a/vendor/github.com/google/go-github/v42/github/event.go +++ b/vendor/github.com/google/go-github/v45/github/event.go @@ -102,6 +102,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &PullRequestReviewEvent{} case "PullRequestReviewCommentEvent": payload = &PullRequestReviewCommentEvent{} + case "PullRequestReviewThreadEvent": + payload = &PullRequestReviewThreadEvent{} case "PullRequestTargetEvent": payload = &PullRequestTargetEvent{} case "PushEvent": @@ -112,8 +114,12 @@ func (e *Event) ParsePayload() (payload interface{}, err error) { payload = &RepositoryEvent{} case "RepositoryDispatchEvent": payload = &RepositoryDispatchEvent{} + case "RepositoryImportEvent": + payload = &RepositoryImportEvent{} case "RepositoryVulnerabilityAlertEvent": payload = &RepositoryVulnerabilityAlertEvent{} + case "SecretScanningAlertEvent": + payload = &SecretScanningAlertEvent{} case "StarEvent": payload = &StarEvent{} case "StatusEvent": diff --git a/vendor/github.com/google/go-github/v42/github/event_types.go b/vendor/github.com/google/go-github/v45/github/event_types.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/event_types.go rename to vendor/github.com/google/go-github/v45/github/event_types.go index 6a806c4a1e..b550361848 100644 --- a/vendor/github.com/google/go-github/v42/github/event_types.go +++ b/vendor/github.com/google/go-github/v45/github/event_types.go @@ -106,10 +106,11 @@ type CreateEvent struct { RefType *string `json:"ref_type,omitempty"` MasterBranch *string `json:"master_branch,omitempty"` Description *string `json:"description,omitempty"` + PusherType *string `json:"pusher_type,omitempty"` // The following fields are only populated by Webhook events. - PusherType *string `json:"pusher_type,omitempty"` Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } @@ -145,7 +146,15 @@ type DeployKeyEvent struct { // The deploy key resource. Key *Key `json:"key,omitempty"` + // The Repository where the event occurred + Repo *Repository `json:"repository,omitempty"` + + // The following field is only present when the webhook is triggered on + // a repository belonging to an organization. + Organization *Organization `json:"organization,omitempty"` + // The following fields are only populated by Webhook events. + Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } @@ -485,13 +494,14 @@ type IssuesEvent struct { type LabelEvent struct { // Action is the action that was performed. Possible values are: // "created", "edited", "deleted" - Action *string `json:"action,omitempty"` - Label *Label `json:"label,omitempty"` + Action *string `json:"action,omitempty"` + Label *Label `json:"label,omitempty"` + Changes *EditChange `json:"changes,omitempty"` // The following fields are only populated by Webhook events. - Changes *EditChange `json:"changes,omitempty"` Repo *Repository `json:"repository,omitempty"` Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } @@ -566,6 +576,9 @@ type MetaEvent struct { Hook *Hook `json:"hook,omitempty"` // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } @@ -674,7 +687,12 @@ type PingEvent struct { // The ID of the webhook that triggered the ping. HookID *int64 `json:"hook_id,omitempty"` // The webhook configuration. - Hook *Hook `json:"hook,omitempty"` + Hook *Hook `json:"hook,omitempty"` + + // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } @@ -822,12 +840,30 @@ type PullRequestReviewCommentEvent struct { Installation *Installation `json:"installation,omitempty"` } +// PullRequestReviewThreadEvent is triggered when a comment made as part of a +// review of a pull request is marked resolved or unresolved. +// The Webhook event name is "pull_request_review_thread". +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review_thread +type PullRequestReviewThreadEvent struct { + // Action is the action that was performed on the comment. + // Possible values are: "resolved", "unresolved". + Action *string `json:"action,omitempty"` + Thread *PullRequestThread `json:"thread,omitempty"` + PullRequest *PullRequest `json:"pull_request,omitempty"` + + // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Sender *User `json:"sender,omitempty"` + Installation *Installation `json:"installation,omitempty"` +} + // PullRequestTargetEvent is triggered when a pull request is assigned, unassigned, labeled, // unlabeled, opened, edited, closed, reopened, synchronize, ready_for_review, // locked, unlocked, a pull request review is requested, or a review request is removed. // The Webhook event name is "pull_request_target". // -// GitHub API docs: https://docs.github.com/en/actions/reference/events-that-trigger-workflows#pull_request_target +// GitHub API docs: https://docs.github.com/en/actions/events-that-trigger-workflows#pull_request_target type PullRequestTargetEvent struct { // Action is the action that was performed. Possible values are: // "assigned", "unassigned", "labeled", "unlabeled", "opened", "edited", "closed", "reopened", @@ -877,6 +913,7 @@ type PushEvent struct { DistinctSize *int `json:"distinct_size,omitempty"` // The following fields are only populated by Webhook events. + Action *string `json:"action,omitempty"` After *string `json:"after,omitempty"` Created *bool `json:"created,omitempty"` Deleted *bool `json:"deleted,omitempty"` @@ -918,8 +955,8 @@ type HeadCommit struct { Modified []string `json:"modified,omitempty"` } -func (p HeadCommit) String() string { - return Stringify(p) +func (h HeadCommit) String() string { + return Stringify(h) } // PushEventRepository represents the repo object in a PushEvent payload. @@ -1024,6 +1061,17 @@ type RepositoryDispatchEvent struct { Installation *Installation `json:"installation,omitempty"` } +// RepositoryImportEvent represents the activity related to a repository being imported to GitHub. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#repository_import +type RepositoryImportEvent struct { + // Status represents the final state of the import. This can be one of "success", "cancelled", or "failure". + Status *string `json:"status,omitempty"` + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` +} + // RepositoryVulnerabilityAlertEvent is triggered when a security alert is created, dismissed, or resolved. // // GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository_vulnerability_alert @@ -1031,14 +1079,17 @@ type RepositoryVulnerabilityAlertEvent struct { // Action is the action that was performed. Possible values are: "create", "dismiss", "resolve". Action *string `json:"action,omitempty"` - //The security alert of the vulnerable dependency. + // The security alert of the vulnerable dependency. Alert *RepositoryVulnerabilityAlert `json:"alert,omitempty"` - //The repository of the vulnerable dependency. + // The repository of the vulnerable dependency. Repository *Repository `json:"repository,omitempty"` // The following fields are only populated by Webhook events. Installation *Installation `json:"installation,omitempty"` + + // The user that triggered the event. + Sender *User `json:"sender,omitempty"` } // RepositoryVulnerabilityAlert represents a repository security alert. @@ -1057,6 +1108,26 @@ type RepositoryVulnerabilityAlert struct { DismissedAt *Timestamp `json:"dismissed_at,omitempty"` } +// SecretScanningAlertEvent is triggered when a secret scanning alert occurs in a repository. +// The Webhook name is secret_scanning_alert. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#secret_scanning_alert +type SecretScanningAlertEvent struct { + // Action is the action that was performed. Possible values are: "created", "resolved", or "reopened". + Action *string `json:"action,omitempty"` + + // Alert is the secret scanning alert involved in the event. + Alert *SecretScanningAlert `json:"alert,omitempty"` + + // Only populated by the "resolved" and "reopen" actions + Sender *User `json:"sender,omitempty"` + // The following fields are only populated by Webhook events. + Repo *Repository `json:"repository,omitempty"` + Organization *Organization `json:"organization,omitempty"` + Enterprise *Enterprise `json:"enterprise,omitempty"` + Installation *Installation `json:"installation,omitempty"` +} + // StarEvent is triggered when a star is added or removed from a repository. // The Webhook event name is "star". // @@ -1220,3 +1291,71 @@ type WorkflowRunEvent struct { Sender *User `json:"sender,omitempty"` Installation *Installation `json:"installation,omitempty"` } + +// SecurityAdvisory represents the advisory object in SecurityAdvisoryEvent payload. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#security_advisory +type SecurityAdvisory struct { + GHSAID *string `json:"ghsa_id,omitempty"` + Summary *string `json:"summary,omitempty"` + Description *string `json:"description,omitempty"` + Severity *string `json:"severity,omitempty"` + Identifiers []*AdvisoryIdentifier `json:"identifiers,omitempty"` + References []*AdvisoryReference `json:"references,omitempty"` + PublishedAt *Timestamp `json:"published_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + WithdrawnAt *Timestamp `json:"withdrawn_at,omitempty"` + Vulnerabilities []*AdvisoryVulnerability `json:"vulnerabilities,omitempty"` +} + +// AdvisoryIdentifier represents the identifier for a Security Advisory. +type AdvisoryIdentifier struct { + Value *string `json:"value,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AdvisoryReference represents the reference url for the security advisory. +type AdvisoryReference struct { + URL *string `json:"url,omitempty"` +} + +// AdvisoryVulnerability represents the vulnerability object for a Security Advisory. +type AdvisoryVulnerability struct { + Package *VulnerabilityPackage `json:"package,omitempty"` + Severity *string `json:"severity,omitempty"` + VulnerableVersionRange *string `json:"vulnerable_version_range,omitempty"` + FirstPatchedVersion *FirstPatchedVersion `json:"first_patched_version,omitempty"` +} + +// VulnerabilityPackage represents the package object for an Advisory Vulnerability. +type VulnerabilityPackage struct { + Ecosystem *string `json:"ecosystem,omitempty"` + Name *string `json:"name,omitempty"` +} + +// FirstPatchedVersion represents the identifier for the first patched version of that vulnerability. +type FirstPatchedVersion struct { + Identifier *string `json:"identifier,omitempty"` +} + +// SecurityAdvisoryEvent is triggered when a security-related vulnerability is found in software on GitHub. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#security_advisory +type SecurityAdvisoryEvent struct { + Action *string `json:"action,omitempty"` + SecurityAdvisory *SecurityAdvisory `json:"security_advisory,omitempty"` +} + +// CodeScanningAlertEvent is triggered when a code scanning finds a potential vulnerability or error in your code. +// +// GitHub API docs: https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads#code_scanning_alert +type CodeScanningAlertEvent struct { + Action *string `json:"action,omitempty"` + Alert *Alert `json:"alert,omitempty"` + Ref *string `json:"ref,omitempty"` + // CommitOID is the commit SHA of the code scanning alert + CommitOID *string `json:"commit_oid,omitempty"` + Repo *Repository `json:"repository,omitempty"` + Org *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` +} diff --git a/vendor/github.com/google/go-github/v42/github/gists.go b/vendor/github.com/google/go-github/v45/github/gists.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/gists.go rename to vendor/github.com/google/go-github/v45/github/gists.go index 4971c6bf54..ecdc6f2726 100644 --- a/vendor/github.com/google/go-github/v42/github/gists.go +++ b/vendor/github.com/google/go-github/v45/github/gists.go @@ -14,7 +14,7 @@ import ( // GistsService handles communication with the Gist related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/ +// GitHub API docs: https://docs.github.com/en/rest/gists type GistsService service // Gist represents a GitHub's gist. @@ -96,8 +96,8 @@ type GistListOptions struct { // is authenticated, it will returns all gists for the authenticated // user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-gists-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-gists-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gists-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gists-for-a-user func (s *GistsService) List(ctx context.Context, user string, opts *GistListOptions) ([]*Gist, *Response, error) { var u string if user != "" { @@ -126,7 +126,7 @@ func (s *GistsService) List(ctx context.Context, user string, opts *GistListOpti // ListAll lists all public gists. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-public-gists +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-public-gists func (s *GistsService) ListAll(ctx context.Context, opts *GistListOptions) ([]*Gist, *Response, error) { u, err := addOptions("gists/public", opts) if err != nil { @@ -149,7 +149,7 @@ func (s *GistsService) ListAll(ctx context.Context, opts *GistListOptions) ([]*G // ListStarred lists starred gists of authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-starred-gists +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-starred-gists func (s *GistsService) ListStarred(ctx context.Context, opts *GistListOptions) ([]*Gist, *Response, error) { u, err := addOptions("gists/starred", opts) if err != nil { @@ -172,7 +172,7 @@ func (s *GistsService) ListStarred(ctx context.Context, opts *GistListOptions) ( // Get a single gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#get-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#get-a-gist func (s *GistsService) Get(ctx context.Context, id string) (*Gist, *Response, error) { u := fmt.Sprintf("gists/%v", id) req, err := s.client.NewRequest("GET", u, nil) @@ -191,7 +191,7 @@ func (s *GistsService) Get(ctx context.Context, id string) (*Gist, *Response, er // GetRevision gets a specific revision of a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#get-a-gist-revision +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#get-a-gist-revision func (s *GistsService) GetRevision(ctx context.Context, id, sha string) (*Gist, *Response, error) { u := fmt.Sprintf("gists/%v/%v", id, sha) req, err := s.client.NewRequest("GET", u, nil) @@ -210,7 +210,7 @@ func (s *GistsService) GetRevision(ctx context.Context, id, sha string) (*Gist, // Create a gist for authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#create-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#create-a-gist func (s *GistsService) Create(ctx context.Context, gist *Gist) (*Gist, *Response, error) { u := "gists" req, err := s.client.NewRequest("POST", u, gist) @@ -229,7 +229,7 @@ func (s *GistsService) Create(ctx context.Context, gist *Gist) (*Gist, *Response // Edit a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#update-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#update-a-gist func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist, *Response, error) { u := fmt.Sprintf("gists/%v", id) req, err := s.client.NewRequest("PATCH", u, gist) @@ -248,7 +248,7 @@ func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist, // ListCommits lists commits of a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-gist-commits +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gist-commits func (s *GistsService) ListCommits(ctx context.Context, id string, opts *ListOptions) ([]*GistCommit, *Response, error) { u := fmt.Sprintf("gists/%v/commits", id) u, err := addOptions(u, opts) @@ -272,49 +272,53 @@ func (s *GistsService) ListCommits(ctx context.Context, id string, opts *ListOpt // Delete a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#delete-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#delete-a-gist func (s *GistsService) Delete(ctx context.Context, id string) (*Response, error) { u := fmt.Sprintf("gists/%v", id) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // Star a gist on behalf of authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#star-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#star-a-gist func (s *GistsService) Star(ctx context.Context, id string) (*Response, error) { u := fmt.Sprintf("gists/%v/star", id) req, err := s.client.NewRequest("PUT", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // Unstar a gist on a behalf of authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#unstar-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#unstar-a-gist func (s *GistsService) Unstar(ctx context.Context, id string) (*Response, error) { u := fmt.Sprintf("gists/%v/star", id) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // IsStarred checks if a gist is starred by authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#check-if-a-gist-is-starred +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#check-if-a-gist-is-starred func (s *GistsService) IsStarred(ctx context.Context, id string) (bool, *Response, error) { u := fmt.Sprintf("gists/%v/star", id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return false, nil, err } + resp, err := s.client.Do(ctx, req, nil) starred, err := parseBoolResponse(err) return starred, resp, err @@ -322,7 +326,7 @@ func (s *GistsService) IsStarred(ctx context.Context, id string) (bool, *Respons // Fork a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#fork-a-gist +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#fork-a-gist func (s *GistsService) Fork(ctx context.Context, id string) (*Gist, *Response, error) { u := fmt.Sprintf("gists/%v/forks", id) req, err := s.client.NewRequest("POST", u, nil) @@ -341,7 +345,7 @@ func (s *GistsService) Fork(ctx context.Context, id string) (*Gist, *Response, e // ListForks lists forks of a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-gist-forks +// GitHub API docs: https://docs.github.com/en/rest/gists/gists#list-gist-forks func (s *GistsService) ListForks(ctx context.Context, id string, opts *ListOptions) ([]*GistFork, *Response, error) { u := fmt.Sprintf("gists/%v/forks", id) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/gists_comments.go b/vendor/github.com/google/go-github/v45/github/gists_comments.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/gists_comments.go rename to vendor/github.com/google/go-github/v45/github/gists_comments.go index a9452c9641..d551e9a11d 100644 --- a/vendor/github.com/google/go-github/v42/github/gists_comments.go +++ b/vendor/github.com/google/go-github/v45/github/gists_comments.go @@ -26,7 +26,7 @@ func (g GistComment) String() string { // ListComments lists all comments for a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#list-gist-comments +// GitHub API docs: https://docs.github.com/en/rest/gists/comments#list-gist-comments func (s *GistsService) ListComments(ctx context.Context, gistID string, opts *ListOptions) ([]*GistComment, *Response, error) { u := fmt.Sprintf("gists/%v/comments", gistID) u, err := addOptions(u, opts) @@ -50,7 +50,7 @@ func (s *GistsService) ListComments(ctx context.Context, gistID string, opts *Li // GetComment retrieves a single comment from a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#get-a-gist-comment +// GitHub API docs: https://docs.github.com/en/rest/gists/comments#get-a-gist-comment func (s *GistsService) GetComment(ctx context.Context, gistID string, commentID int64) (*GistComment, *Response, error) { u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) req, err := s.client.NewRequest("GET", u, nil) @@ -69,7 +69,7 @@ func (s *GistsService) GetComment(ctx context.Context, gistID string, commentID // CreateComment creates a comment for a gist. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#create-a-gist-comment +// GitHub API docs: https://docs.github.com/en/rest/gists/comments#create-a-gist-comment func (s *GistsService) CreateComment(ctx context.Context, gistID string, comment *GistComment) (*GistComment, *Response, error) { u := fmt.Sprintf("gists/%v/comments", gistID) req, err := s.client.NewRequest("POST", u, comment) @@ -88,7 +88,7 @@ func (s *GistsService) CreateComment(ctx context.Context, gistID string, comment // EditComment edits an existing gist comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#update-a-gist-comment +// GitHub API docs: https://docs.github.com/en/rest/gists/comments#update-a-gist-comment func (s *GistsService) EditComment(ctx context.Context, gistID string, commentID int64, comment *GistComment) (*GistComment, *Response, error) { u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) req, err := s.client.NewRequest("PATCH", u, comment) @@ -107,7 +107,7 @@ func (s *GistsService) EditComment(ctx context.Context, gistID string, commentID // DeleteComment deletes a gist comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gists/#delete-a-gist-comment +// GitHub API docs: https://docs.github.com/en/rest/gists/comments#delete-a-gist-comment func (s *GistsService) DeleteComment(ctx context.Context, gistID string, commentID int64) (*Response, error) { u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/git.go b/vendor/github.com/google/go-github/v45/github/git.go similarity index 77% rename from vendor/github.com/google/go-github/v42/github/git.go rename to vendor/github.com/google/go-github/v45/github/git.go index 36b33f5e05..8960de7b14 100644 --- a/vendor/github.com/google/go-github/v42/github/git.go +++ b/vendor/github.com/google/go-github/v45/github/git.go @@ -8,5 +8,5 @@ package github // GitService handles communication with the git data related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/ +// GitHub API docs: https://docs.github.com/en/rest/git/ type GitService service diff --git a/vendor/github.com/google/go-github/v42/github/git_blobs.go b/vendor/github.com/google/go-github/v45/github/git_blobs.go similarity index 80% rename from vendor/github.com/google/go-github/v42/github/git_blobs.go rename to vendor/github.com/google/go-github/v45/github/git_blobs.go index 6bc59c6f88..da0485ccbe 100644 --- a/vendor/github.com/google/go-github/v42/github/git_blobs.go +++ b/vendor/github.com/google/go-github/v45/github/git_blobs.go @@ -23,7 +23,7 @@ type Blob struct { // GetBlob fetches a blob from a repo given a SHA. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-blob +// GitHub API docs: https://docs.github.com/en/rest/git/blobs#get-a-blob func (s *GitService) GetBlob(ctx context.Context, owner string, repo string, sha string) (*Blob, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) req, err := s.client.NewRequest("GET", u, nil) @@ -33,29 +33,38 @@ func (s *GitService) GetBlob(ctx context.Context, owner string, repo string, sha blob := new(Blob) resp, err := s.client.Do(ctx, req, blob) - return blob, resp, err + if err != nil { + return nil, resp, err + } + + return blob, resp, nil } // GetBlobRaw fetches a blob's contents from a repo. // Unlike GetBlob, it returns the raw bytes rather than the base64-encoded data. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-blob +// GitHub API docs: https://docs.github.com/en/rest/git/blobs#get-a-blob func (s *GitService) GetBlobRaw(ctx context.Context, owner, repo, sha string) ([]byte, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } + req.Header.Set("Accept", "application/vnd.github.v3.raw") var buf bytes.Buffer resp, err := s.client.Do(ctx, req, &buf) - return buf.Bytes(), resp, err + if err != nil { + return nil, resp, err + } + + return buf.Bytes(), resp, nil } // CreateBlob creates a blob object. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#create-a-blob +// GitHub API docs: https://docs.github.com/en/rest/git/blobs#create-a-blob func (s *GitService) CreateBlob(ctx context.Context, owner string, repo string, blob *Blob) (*Blob, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo) req, err := s.client.NewRequest("POST", u, blob) @@ -65,5 +74,9 @@ func (s *GitService) CreateBlob(ctx context.Context, owner string, repo string, t := new(Blob) resp, err := s.client.Do(ctx, req, t) - return t, resp, err + if err != nil { + return nil, resp, err + } + + return t, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/git_commits.go b/vendor/github.com/google/go-github/v45/github/git_commits.go similarity index 96% rename from vendor/github.com/google/go-github/v42/github/git_commits.go rename to vendor/github.com/google/go-github/v45/github/git_commits.go index 7a728bee17..baedb3d686 100644 --- a/vendor/github.com/google/go-github/v42/github/git_commits.go +++ b/vendor/github.com/google/go-github/v45/github/git_commits.go @@ -70,7 +70,7 @@ func (c CommitAuthor) String() string { // GetCommit fetches the Commit object for a given SHA. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-commit +// GitHub API docs: https://docs.github.com/en/rest/git/commits#get-a-commit func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha) req, err := s.client.NewRequest("GET", u, nil) @@ -104,7 +104,7 @@ type createCommit struct { // data if omitted. If the commit.Author is omitted, it will be filled in with // the authenticated user’s information and the current date. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#create-a-commit +// GitHub API docs: https://docs.github.com/en/rest/git/commits#create-a-commit func (s *GitService) CreateCommit(ctx context.Context, owner string, repo string, commit *Commit) (*Commit, *Response, error) { if commit == nil { return nil, nil, fmt.Errorf("commit must be provided") diff --git a/vendor/github.com/google/go-github/v42/github/git_refs.go b/vendor/github.com/google/go-github/v45/github/git_refs.go similarity index 89% rename from vendor/github.com/google/go-github/v42/github/git_refs.go rename to vendor/github.com/google/go-github/v45/github/git_refs.go index 259f27fada..883975cc0f 100644 --- a/vendor/github.com/google/go-github/v42/github/git_refs.go +++ b/vendor/github.com/google/go-github/v45/github/git_refs.go @@ -49,7 +49,7 @@ type updateRefRequest struct { // GetRef fetches a single reference in a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-reference +// GitHub API docs: https://docs.github.com/en/rest/git/refs#get-a-reference func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref string) (*Reference, *Response, error) { ref = strings.TrimPrefix(ref, "refs/") u := fmt.Sprintf("repos/%v/%v/git/ref/%v", owner, repo, refURLEscape(ref)) @@ -88,7 +88,7 @@ type ReferenceListOptions struct { // ListMatchingRefs lists references in a repository that match a supplied ref. // Use an empty ref to list all references. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#list-matching-references +// GitHub API docs: https://docs.github.com/en/rest/git/refs#list-matching-references func (s *GitService) ListMatchingRefs(ctx context.Context, owner, repo string, opts *ReferenceListOptions) ([]*Reference, *Response, error) { var ref string if opts != nil { @@ -116,7 +116,7 @@ func (s *GitService) ListMatchingRefs(ctx context.Context, owner, repo string, o // CreateRef creates a new ref in a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#create-a-reference +// GitHub API docs: https://docs.github.com/en/rest/git/refs#create-a-reference func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, ref *Reference) (*Reference, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo) req, err := s.client.NewRequest("POST", u, &createRefRequest{ @@ -139,7 +139,7 @@ func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, r // UpdateRef updates an existing ref in a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#update-a-reference +// GitHub API docs: https://docs.github.com/en/rest/git/refs#update-a-reference func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) { refPath := strings.TrimPrefix(*ref.Ref, "refs/") u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath) @@ -162,7 +162,7 @@ func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, r // DeleteRef deletes a ref from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#delete-a-reference +// GitHub API docs: https://docs.github.com/en/rest/git/refs#delete-a-reference func (s *GitService) DeleteRef(ctx context.Context, owner string, repo string, ref string) (*Response, error) { ref = strings.TrimPrefix(ref, "refs/") u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refURLEscape(ref)) diff --git a/vendor/github.com/google/go-github/v42/github/git_tags.go b/vendor/github.com/google/go-github/v45/github/git_tags.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/git_tags.go rename to vendor/github.com/google/go-github/v45/github/git_tags.go index 10029c4549..30d7b2c2d2 100644 --- a/vendor/github.com/google/go-github/v42/github/git_tags.go +++ b/vendor/github.com/google/go-github/v45/github/git_tags.go @@ -35,7 +35,7 @@ type createTagRequest struct { // GetTag fetches a tag from a repo given a SHA. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-tag +// GitHub API docs: https://docs.github.com/en/rest/git/tags#get-a-tag func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha) req, err := s.client.NewRequest("GET", u, nil) @@ -45,12 +45,16 @@ func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha tag := new(Tag) resp, err := s.client.Do(ctx, req, tag) - return tag, resp, err + if err != nil { + return nil, resp, err + } + + return tag, resp, nil } // CreateTag creates a tag object. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#create-a-tag-object +// GitHub API docs: https://docs.github.com/en/rest/git/tags#create-a-tag-object func (s *GitService) CreateTag(ctx context.Context, owner string, repo string, tag *Tag) (*Tag, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo) @@ -72,5 +76,9 @@ func (s *GitService) CreateTag(ctx context.Context, owner string, repo string, t t := new(Tag) resp, err := s.client.Do(ctx, req, t) - return t, resp, err + if err != nil { + return nil, resp, err + } + + return t, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/git_trees.go b/vendor/github.com/google/go-github/v45/github/git_trees.go similarity index 95% rename from vendor/github.com/google/go-github/v42/github/git_trees.go rename to vendor/github.com/google/go-github/v45/github/git_trees.go index e655e93a0b..db28976e03 100644 --- a/vendor/github.com/google/go-github/v42/github/git_trees.go +++ b/vendor/github.com/google/go-github/v45/github/git_trees.go @@ -93,7 +93,7 @@ func (t *TreeEntry) MarshalJSON() ([]byte, error) { // GetTree fetches the Tree object for a given sha hash from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#get-a-tree +// GitHub API docs: https://docs.github.com/en/rest/git/trees#get-a-tree func (s *GitService) GetTree(ctx context.Context, owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha) if recursive { @@ -124,7 +124,7 @@ type createTree struct { // path modifying that tree are specified, it will overwrite the contents of // that tree with the new path contents and write a new tree out. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/git/#create-a-tree +// GitHub API docs: https://docs.github.com/en/rest/git/trees#create-a-tree func (s *GitService) CreateTree(ctx context.Context, owner string, repo string, baseTree string, entries []*TreeEntry) (*Tree, *Response, error) { u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo) diff --git a/vendor/github.com/google/go-github/v42/github/github-accessors.go b/vendor/github.com/google/go-github/v45/github/github-accessors.go similarity index 95% rename from vendor/github.com/google/go-github/v42/github/github-accessors.go rename to vendor/github.com/google/go-github/v45/github/github-accessors.go index 12d6019f0a..0092c58840 100644 --- a/vendor/github.com/google/go-github/v42/github/github-accessors.go +++ b/vendor/github.com/google/go-github/v45/github/github-accessors.go @@ -4,6 +4,8 @@ // license that can be found in the LICENSE file. // Code generated by gen-accessors; DO NOT EDIT. +// Instead, please run "go generate ./..." as described here: +// https://github.com/google/go-github/blob/master/CONTRIBUTING.md#submitting-a-patch package github @@ -60,6 +62,30 @@ func (a *ActionsPermissions) GetSelectedActionsURL() string { return *a.SelectedActionsURL } +// GetAllowedActions returns the AllowedActions field if it's non-nil, zero value otherwise. +func (a *ActionsPermissionsRepository) GetAllowedActions() string { + if a == nil || a.AllowedActions == nil { + return "" + } + return *a.AllowedActions +} + +// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. +func (a *ActionsPermissionsRepository) GetEnabled() bool { + if a == nil || a.Enabled == nil { + return false + } + return *a.Enabled +} + +// GetSelectedActionsURL returns the SelectedActionsURL field if it's non-nil, zero value otherwise. +func (a *ActionsPermissionsRepository) GetSelectedActionsURL() string { + if a == nil || a.SelectedActionsURL == nil { + return "" + } + return *a.SelectedActionsURL +} + // GetURL returns the URL field if it's non-nil, zero value otherwise. func (a *AdminEnforcement) GetURL() string { if a == nil || a.URL == nil { @@ -172,6 +198,62 @@ func (a *AdvancedSecurityCommittersBreakdown) GetUserLogin() string { return *a.UserLogin } +// GetType returns the Type field if it's non-nil, zero value otherwise. +func (a *AdvisoryIdentifier) GetType() string { + if a == nil || a.Type == nil { + return "" + } + return *a.Type +} + +// GetValue returns the Value field if it's non-nil, zero value otherwise. +func (a *AdvisoryIdentifier) GetValue() string { + if a == nil || a.Value == nil { + return "" + } + return *a.Value +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (a *AdvisoryReference) GetURL() string { + if a == nil || a.URL == nil { + return "" + } + return *a.URL +} + +// GetFirstPatchedVersion returns the FirstPatchedVersion field. +func (a *AdvisoryVulnerability) GetFirstPatchedVersion() *FirstPatchedVersion { + if a == nil { + return nil + } + return a.FirstPatchedVersion +} + +// GetPackage returns the Package field. +func (a *AdvisoryVulnerability) GetPackage() *VulnerabilityPackage { + if a == nil { + return nil + } + return a.Package +} + +// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. +func (a *AdvisoryVulnerability) GetSeverity() string { + if a == nil || a.Severity == nil { + return "" + } + return *a.Severity +} + +// GetVulnerableVersionRange returns the VulnerableVersionRange field if it's non-nil, zero value otherwise. +func (a *AdvisoryVulnerability) GetVulnerableVersionRange() string { + if a == nil || a.VulnerableVersionRange == nil { + return "" + } + return *a.VulnerableVersionRange +} + // GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise. func (a *Alert) GetClosedAt() Timestamp { if a == nil || a.ClosedAt == nil { @@ -220,6 +302,14 @@ func (a *Alert) GetDismissedReason() string { return *a.DismissedReason } +// GetFixedAt returns the FixedAt field if it's non-nil, zero value otherwise. +func (a *Alert) GetFixedAt() Timestamp { + if a == nil || a.FixedAt == nil { + return Timestamp{} + } + return *a.FixedAt +} + // GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. func (a *Alert) GetHTMLURL() string { if a == nil || a.HTMLURL == nil { @@ -244,6 +334,22 @@ func (a *Alert) GetMostRecentInstance() *MostRecentInstance { return a.MostRecentInstance } +// GetNumber returns the Number field if it's non-nil, zero value otherwise. +func (a *Alert) GetNumber() int { + if a == nil || a.Number == nil { + return 0 + } + return *a.Number +} + +// GetRepository returns the Repository field. +func (a *Alert) GetRepository() *Repository { + if a == nil { + return nil + } + return a.Repository +} + // GetRule returns the Rule field. func (a *Alert) GetRule() *Rule { if a == nil { @@ -292,6 +398,14 @@ func (a *Alert) GetTool() *Tool { return a.Tool } +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (a *Alert) GetUpdatedAt() Timestamp { + if a == nil || a.UpdatedAt == nil { + return Timestamp{} + } + return *a.UpdatedAt +} + // GetURL returns the URL field if it's non-nil, zero value otherwise. func (a *Alert) GetURL() string { if a == nil || a.URL == nil { @@ -316,6 +430,14 @@ func (a *AnalysesListOptions) GetSarifID() string { return *a.SarifID } +// GetSSHKeyFingerprints returns the SSHKeyFingerprints map if it's non-nil, an empty map otherwise. +func (a *APIMeta) GetSSHKeyFingerprints() map[string]string { + if a == nil || a.SSHKeyFingerprints == nil { + return map[string]string{} + } + return a.SSHKeyFingerprints +} + // GetVerifiablePasswordAuthentication returns the VerifiablePasswordAuthentication field if it's non-nil, zero value otherwise. func (a *APIMeta) GetVerifiablePasswordAuthentication() bool { if a == nil || a.VerifiablePasswordAuthentication == nil { @@ -2204,6 +2326,62 @@ func (c *CodeResult) GetSHA() string { return *c.SHA } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (c *CodeScanningAlertEvent) GetAction() string { + if c == nil || c.Action == nil { + return "" + } + return *c.Action +} + +// GetAlert returns the Alert field. +func (c *CodeScanningAlertEvent) GetAlert() *Alert { + if c == nil { + return nil + } + return c.Alert +} + +// GetCommitOID returns the CommitOID field if it's non-nil, zero value otherwise. +func (c *CodeScanningAlertEvent) GetCommitOID() string { + if c == nil || c.CommitOID == nil { + return "" + } + return *c.CommitOID +} + +// GetOrg returns the Org field. +func (c *CodeScanningAlertEvent) GetOrg() *Organization { + if c == nil { + return nil + } + return c.Org +} + +// GetRef returns the Ref field if it's non-nil, zero value otherwise. +func (c *CodeScanningAlertEvent) GetRef() string { + if c == nil || c.Ref == nil { + return "" + } + return *c.Ref +} + +// GetRepo returns the Repo field. +func (c *CodeScanningAlertEvent) GetRepo() *Repository { + if c == nil { + return nil + } + return c.Repo +} + +// GetSender returns the Sender field. +func (c *CodeScanningAlertEvent) GetSender() *User { + if c == nil { + return nil + } + return c.Sender +} + // GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise. func (c *CodeSearchResult) GetIncompleteResults() bool { if c == nil || c.IncompleteResults == nil { @@ -3260,6 +3438,14 @@ func (c *CreateEvent) GetMasterBranch() string { return *c.MasterBranch } +// GetOrg returns the Org field. +func (c *CreateEvent) GetOrg() *Organization { + if c == nil { + return nil + } + return c.Org +} + // GetPusherType returns the PusherType field if it's non-nil, zero value otherwise. func (c *CreateEvent) GetPusherType() string { if c == nil || c.PusherType == nil { @@ -3372,6 +3558,22 @@ func (c *CreateUserProjectOptions) GetBody() string { return *c.Body } +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetID() int64 { + if c == nil || c.ID == nil { + return 0 + } + return *c.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + // GetInstallation returns the Installation field. func (d *DeleteEvent) GetInstallation() *Installation { if d == nil { @@ -3444,6 +3646,30 @@ func (d *DeployKeyEvent) GetKey() *Key { return d.Key } +// GetOrganization returns the Organization field. +func (d *DeployKeyEvent) GetOrganization() *Organization { + if d == nil { + return nil + } + return d.Organization +} + +// GetRepo returns the Repo field. +func (d *DeployKeyEvent) GetRepo() *Repository { + if d == nil { + return nil + } + return d.Repo +} + +// GetSender returns the Sender field. +func (d *DeployKeyEvent) GetSender() *User { + if d == nil { + return nil + } + return d.Sender +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (d *Deployment) GetCreatedAt() Timestamp { if d == nil || d.CreatedAt == nil { @@ -4900,6 +5126,14 @@ func (f *Feeds) GetUserURL() string { return *f.UserURL } +// GetIdentifier returns the Identifier field if it's non-nil, zero value otherwise. +func (f *FirstPatchedVersion) GetIdentifier() string { + if f == nil || f.Identifier == nil { + return "" + } + return *f.Identifier +} + // GetForkee returns the Forkee field. func (f *ForkEvent) GetForkee() *Repository { if f == nil { @@ -7676,6 +7910,14 @@ func (l *LabelEvent) GetRepo() *Repository { return l.Repo } +// GetSender returns the Sender field. +func (l *LabelEvent) GetSender() *User { + if l == nil { + return nil + } + return l.Sender +} + // GetColor returns the Color field if it's non-nil, zero value otherwise. func (l *LabelResult) GetColor() string { if l == nil || l.Color == nil { @@ -8516,6 +8758,30 @@ func (m *MetaEvent) GetInstallation() *Installation { return m.Installation } +// GetOrg returns the Org field. +func (m *MetaEvent) GetOrg() *Organization { + if m == nil { + return nil + } + return m.Org +} + +// GetRepo returns the Repo field. +func (m *MetaEvent) GetRepo() *Repository { + if m == nil { + return nil + } + return m.Repo +} + +// GetSender returns the Sender field. +func (m *MetaEvent) GetSender() *User { + if m == nil { + return nil + } + return m.Sender +} + // GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. func (m *Metric) GetHTMLURL() string { if m == nil || m.HTMLURL == nil { @@ -9356,6 +9622,14 @@ func (o *Organization) GetMembersCanCreateRepos() bool { return *o.MembersCanCreateRepos } +// GetMembersCanForkPrivateRepos returns the MembersCanForkPrivateRepos field if it's non-nil, zero value otherwise. +func (o *Organization) GetMembersCanForkPrivateRepos() bool { + if o == nil || o.MembersCanForkPrivateRepos == nil { + return false + } + return *o.MembersCanForkPrivateRepos +} + // GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise. func (o *Organization) GetMembersURL() string { if o == nil || o.MembersURL == nil { @@ -9484,6 +9758,14 @@ func (o *Organization) GetURL() string { return *o.URL } +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (o *OrganizationCustomRepoRoles) GetTotalCount() int { + if o == nil || o.TotalCount == nil { + return 0 + } + return *o.TotalCount +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (o *OrganizationEvent) GetAction() string { if o == nil || o.Action == nil { @@ -10484,6 +10766,22 @@ func (p *PagesUpdate) GetCNAME() string { return *p.CNAME } +// GetHTTPSEnforced returns the HTTPSEnforced field if it's non-nil, zero value otherwise. +func (p *PagesUpdate) GetHTTPSEnforced() bool { + if p == nil || p.HTTPSEnforced == nil { + return false + } + return *p.HTTPSEnforced +} + +// GetPublic returns the Public field if it's non-nil, zero value otherwise. +func (p *PagesUpdate) GetPublic() bool { + if p == nil || p.Public == nil { + return false + } + return *p.Public +} + // GetSource returns the Source field if it's non-nil, zero value otherwise. func (p *PagesUpdate) GetSource() string { if p == nil || p.Source == nil { @@ -10516,6 +10814,30 @@ func (p *PingEvent) GetInstallation() *Installation { return p.Installation } +// GetOrg returns the Org field. +func (p *PingEvent) GetOrg() *Organization { + if p == nil { + return nil + } + return p.Org +} + +// GetRepo returns the Repo field. +func (p *PingEvent) GetRepo() *Repository { + if p == nil { + return nil + } + return p.Repo +} + +// GetSender returns the Sender field. +func (p *PingEvent) GetSender() *User { + if p == nil { + return nil + } + return p.Sender +} + // GetZen returns the Zen field if it's non-nil, zero value otherwise. func (p *PingEvent) GetZen() string { if p == nil || p.Zen == nil { @@ -12596,20 +12918,76 @@ func (p *PullRequestReviewsEnforcementUpdate) GetDismissStaleReviews() bool { return *p.DismissStaleReviews } +// GetRequireCodeOwnerReviews returns the RequireCodeOwnerReviews field if it's non-nil, zero value otherwise. +func (p *PullRequestReviewsEnforcementUpdate) GetRequireCodeOwnerReviews() bool { + if p == nil || p.RequireCodeOwnerReviews == nil { + return false + } + return *p.RequireCodeOwnerReviews +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetAction() string { +func (p *PullRequestReviewThreadEvent) GetAction() string { if p == nil || p.Action == nil { return "" } return *p.Action } -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (p *PullRequestTargetEvent) GetAfter() string { - if p == nil || p.After == nil { - return "" - } - return *p.After +// GetInstallation returns the Installation field. +func (p *PullRequestReviewThreadEvent) GetInstallation() *Installation { + if p == nil { + return nil + } + return p.Installation +} + +// GetPullRequest returns the PullRequest field. +func (p *PullRequestReviewThreadEvent) GetPullRequest() *PullRequest { + if p == nil { + return nil + } + return p.PullRequest +} + +// GetRepo returns the Repo field. +func (p *PullRequestReviewThreadEvent) GetRepo() *Repository { + if p == nil { + return nil + } + return p.Repo +} + +// GetSender returns the Sender field. +func (p *PullRequestReviewThreadEvent) GetSender() *User { + if p == nil { + return nil + } + return p.Sender +} + +// GetThread returns the Thread field. +func (p *PullRequestReviewThreadEvent) GetThread() *PullRequestThread { + if p == nil { + return nil + } + return p.Thread +} + +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (p *PullRequestTargetEvent) GetAction() string { + if p == nil || p.Action == nil { + return "" + } + return *p.Action +} + +// GetAfter returns the After field if it's non-nil, zero value otherwise. +func (p *PullRequestTargetEvent) GetAfter() string { + if p == nil || p.After == nil { + return "" + } + return *p.After } // GetAssignee returns the Assignee field. @@ -12708,6 +13086,22 @@ func (p *PullRequestTargetEvent) GetSender() *User { return p.Sender } +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (p *PullRequestThread) GetID() int64 { + if p == nil || p.ID == nil { + return 0 + } + return *p.ID +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (p *PullRequestThread) GetNodeID() string { + if p == nil || p.NodeID == nil { + return "" + } + return *p.NodeID +} + // GetMergablePulls returns the MergablePulls field if it's non-nil, zero value otherwise. func (p *PullStats) GetMergablePulls() int { if p == nil || p.MergablePulls == nil { @@ -12764,6 +13158,14 @@ func (p *PunchCard) GetHour() int { return *p.Hour } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (p *PushEvent) GetAction() string { + if p == nil || p.Action == nil { + return "" + } + return *p.Action +} + // GetAfter returns the After field if it's non-nil, zero value otherwise. func (p *PushEvent) GetAfter() string { if p == nil || p.After == nil { @@ -13212,6 +13614,22 @@ func (p *PushEventRepository) GetWatchersCount() int { return *p.WatchersCount } +// GetActionsRunnerRegistration returns the ActionsRunnerRegistration field. +func (r *RateLimits) GetActionsRunnerRegistration() *Rate { + if r == nil { + return nil + } + return r.ActionsRunnerRegistration +} + +// GetCodeScanningUpload returns the CodeScanningUpload field. +func (r *RateLimits) GetCodeScanningUpload() *Rate { + if r == nil { + return nil + } + return r.CodeScanningUpload +} + // GetCore returns the Core field. func (r *RateLimits) GetCore() *Rate { if r == nil { @@ -13220,6 +13638,30 @@ func (r *RateLimits) GetCore() *Rate { return r.Core } +// GetGraphQL returns the GraphQL field. +func (r *RateLimits) GetGraphQL() *Rate { + if r == nil { + return nil + } + return r.GraphQL +} + +// GetIntegrationManifest returns the IntegrationManifest field. +func (r *RateLimits) GetIntegrationManifest() *Rate { + if r == nil { + return nil + } + return r.IntegrationManifest +} + +// GetSCIM returns the SCIM field. +func (r *RateLimits) GetSCIM() *Rate { + if r == nil { + return nil + } + return r.SCIM +} + // GetSearch returns the Search field. func (r *RateLimits) GetSearch() *Rate { if r == nil { @@ -13228,6 +13670,14 @@ func (r *RateLimits) GetSearch() *Rate { return r.Search } +// GetSourceImport returns the SourceImport field. +func (r *RateLimits) GetSourceImport() *Rate { + if r == nil { + return nil + } + return r.SourceImport +} + // GetContent returns the Content field if it's non-nil, zero value otherwise. func (r *Reaction) GetContent() string { if r == nil || r.Content == nil { @@ -13580,6 +14030,38 @@ func (r *RenameOrgResponse) GetURL() string { return *r.URL } +// GetBranch returns the Branch field if it's non-nil, zero value otherwise. +func (r *RepoMergeUpstreamRequest) GetBranch() string { + if r == nil || r.Branch == nil { + return "" + } + return *r.Branch +} + +// GetBaseBranch returns the BaseBranch field if it's non-nil, zero value otherwise. +func (r *RepoMergeUpstreamResult) GetBaseBranch() string { + if r == nil || r.BaseBranch == nil { + return "" + } + return *r.BaseBranch +} + +// GetMergeType returns the MergeType field if it's non-nil, zero value otherwise. +func (r *RepoMergeUpstreamResult) GetMergeType() string { + if r == nil || r.MergeType == nil { + return "" + } + return *r.MergeType +} + +// GetMessage returns the Message field if it's non-nil, zero value otherwise. +func (r *RepoMergeUpstreamResult) GetMessage() string { + if r == nil || r.Message == nil { + return "" + } + return *r.Message +} + // GetFrom returns the From field if it's non-nil, zero value otherwise. func (r *RepoName) GetFrom() string { if r == nil || r.From == nil { @@ -13612,6 +14094,14 @@ func (r *Repository) GetAllowAutoMerge() bool { return *r.AllowAutoMerge } +// GetAllowForking returns the AllowForking field if it's non-nil, zero value otherwise. +func (r *Repository) GetAllowForking() bool { + if r == nil || r.AllowForking == nil { + return false + } + return *r.AllowForking +} + // GetAllowMergeCommit returns the AllowMergeCommit field if it's non-nil, zero value otherwise. func (r *Repository) GetAllowMergeCommit() bool { if r == nil || r.AllowMergeCommit == nil { @@ -13636,6 +14126,14 @@ func (r *Repository) GetAllowSquashMerge() bool { return *r.AllowSquashMerge } +// GetAllowUpdateBranch returns the AllowUpdateBranch field if it's non-nil, zero value otherwise. +func (r *Repository) GetAllowUpdateBranch() bool { + if r == nil || r.AllowUpdateBranch == nil { + return false + } + return *r.AllowUpdateBranch +} + // GetArchived returns the Archived field if it's non-nil, zero value otherwise. func (r *Repository) GetArchived() bool { if r == nil || r.Archived == nil { @@ -14180,6 +14678,14 @@ func (r *Repository) GetReleasesURL() string { return *r.ReleasesURL } +// GetRoleName returns the RoleName field if it's non-nil, zero value otherwise. +func (r *Repository) GetRoleName() string { + if r == nil || r.RoleName == nil { + return "" + } + return *r.RoleName +} + // GetSecurityAndAnalysis returns the SecurityAndAnalysis field. func (r *Repository) GetSecurityAndAnalysis() *SecurityAndAnalysis { if r == nil { @@ -14324,6 +14830,14 @@ func (r *Repository) GetURL() string { return *r.URL } +// GetUseSquashPRTitleAsDefault returns the UseSquashPRTitleAsDefault field if it's non-nil, zero value otherwise. +func (r *Repository) GetUseSquashPRTitleAsDefault() bool { + if r == nil || r.UseSquashPRTitleAsDefault == nil { + return false + } + return *r.UseSquashPRTitleAsDefault +} + // GetVisibility returns the Visibility field if it's non-nil, zero value otherwise. func (r *Repository) GetVisibility() string { if r == nil || r.Visibility == nil { @@ -14764,6 +15278,38 @@ func (r *RepositoryEvent) GetSender() *User { return r.Sender } +// GetOrg returns the Org field. +func (r *RepositoryImportEvent) GetOrg() *Organization { + if r == nil { + return nil + } + return r.Org +} + +// GetRepo returns the Repo field. +func (r *RepositoryImportEvent) GetRepo() *Repository { + if r == nil { + return nil + } + return r.Repo +} + +// GetSender returns the Sender field. +func (r *RepositoryImportEvent) GetSender() *User { + if r == nil { + return nil + } + return r.Sender +} + +// GetStatus returns the Status field if it's non-nil, zero value otherwise. +func (r *RepositoryImportEvent) GetStatus() string { + if r == nil || r.Status == nil { + return "" + } + return *r.Status +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (r *RepositoryInvitation) GetCreatedAt() Timestamp { if r == nil || r.CreatedAt == nil { @@ -15276,6 +15822,14 @@ func (r *RepositoryVulnerabilityAlertEvent) GetRepository() *Repository { return r.Repository } +// GetSender returns the Sender field. +func (r *RepositoryVulnerabilityAlertEvent) GetSender() *User { + if r == nil { + return nil + } + return r.Sender +} + // GetForkRepos returns the ForkRepos field if it's non-nil, zero value otherwise. func (r *RepoStats) GetForkRepos() int { if r == nil || r.ForkRepos == nil { @@ -15420,6 +15974,14 @@ func (r *RequiredReviewer) GetType() string { return *r.Type } +// GetAppID returns the AppID field if it's non-nil, zero value otherwise. +func (r *RequiredStatusCheck) GetAppID() int64 { + if r == nil || r.AppID == nil { + return 0 + } + return *r.AppID +} + // GetStrict returns the Strict field if it's non-nil, zero value otherwise. func (r *RequiredStatusChecksRequest) GetStrict() bool { if r == nil || r.Strict == nil { @@ -15908,98 +16470,426 @@ func (s *SecretScanning) GetStatus() string { return *s.Status } -// GetAdvancedSecurity returns the AdvancedSecurity field. -func (s *SecurityAndAnalysis) GetAdvancedSecurity() *AdvancedSecurity { - if s == nil { - return nil +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetCreatedAt() Timestamp { + if s == nil || s.CreatedAt == nil { + return Timestamp{} } - return s.AdvancedSecurity + return *s.CreatedAt } -// GetSecretScanning returns the SecretScanning field. -func (s *SecurityAndAnalysis) GetSecretScanning() *SecretScanning { - if s == nil { - return nil +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetHTMLURL() string { + if s == nil || s.HTMLURL == nil { + return "" } - return s.SecretScanning + return *s.HTMLURL } -// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. -func (s *SelectedReposList) GetTotalCount() int { - if s == nil || s.TotalCount == nil { +// GetLocationsURL returns the LocationsURL field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetLocationsURL() string { + if s == nil || s.LocationsURL == nil { + return "" + } + return *s.LocationsURL +} + +// GetNumber returns the Number field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetNumber() int { + if s == nil || s.Number == nil { return 0 } - return *s.TotalCount + return *s.Number } -// GetName returns the Name field if it's non-nil, zero value otherwise. -func (s *ServiceHook) GetName() string { - if s == nil || s.Name == nil { +// GetResolution returns the Resolution field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetResolution() string { + if s == nil || s.Resolution == nil { return "" } - return *s.Name + return *s.Resolution } -// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetEnabled() bool { - if s == nil || s.Enabled == nil { - return false +// GetResolvedAt returns the ResolvedAt field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetResolvedAt() Timestamp { + if s == nil || s.ResolvedAt == nil { + return Timestamp{} } - return *s.Enabled + return *s.ResolvedAt } -// GetURL returns the URL field if it's non-nil, zero value otherwise. -func (s *SignaturesProtectedBranch) GetURL() string { - if s == nil || s.URL == nil { +// GetResolvedBy returns the ResolvedBy field. +func (s *SecretScanningAlert) GetResolvedBy() *User { + if s == nil { + return nil + } + return s.ResolvedBy +} + +// GetSecret returns the Secret field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetSecret() string { + if s == nil || s.Secret == nil { return "" } - return *s.URL + return *s.Secret } -// GetPayload returns the Payload field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetPayload() string { - if s == nil || s.Payload == nil { +// GetSecretType returns the SecretType field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetSecretType() string { + if s == nil || s.SecretType == nil { return "" } - return *s.Payload + return *s.SecretType } -// GetReason returns the Reason field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetReason() string { - if s == nil || s.Reason == nil { +// GetState returns the State field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetState() string { + if s == nil || s.State == nil { return "" } - return *s.Reason + return *s.State } -// GetSignature returns the Signature field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetSignature() string { - if s == nil || s.Signature == nil { +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlert) GetURL() string { + if s == nil || s.URL == nil { return "" } - return *s.Signature + return *s.URL } -// GetVerified returns the Verified field if it's non-nil, zero value otherwise. -func (s *SignatureVerification) GetVerified() bool { - if s == nil || s.Verified == nil { - return false +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertEvent) GetAction() string { + if s == nil || s.Action == nil { + return "" } - return *s.Verified + return *s.Action } -// GetActor returns the Actor field. -func (s *Source) GetActor() *User { +// GetAlert returns the Alert field. +func (s *SecretScanningAlertEvent) GetAlert() *SecretScanningAlert { if s == nil { return nil } - return s.Actor + return s.Alert } -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (s *Source) GetID() int64 { - if s == nil || s.ID == nil { - return 0 +// GetEnterprise returns the Enterprise field. +func (s *SecretScanningAlertEvent) GetEnterprise() *Enterprise { + if s == nil { + return nil + } + return s.Enterprise +} + +// GetInstallation returns the Installation field. +func (s *SecretScanningAlertEvent) GetInstallation() *Installation { + if s == nil { + return nil + } + return s.Installation +} + +// GetOrganization returns the Organization field. +func (s *SecretScanningAlertEvent) GetOrganization() *Organization { + if s == nil { + return nil + } + return s.Organization +} + +// GetRepo returns the Repo field. +func (s *SecretScanningAlertEvent) GetRepo() *Repository { + if s == nil { + return nil + } + return s.Repo +} + +// GetSender returns the Sender field. +func (s *SecretScanningAlertEvent) GetSender() *User { + if s == nil { + return nil + } + return s.Sender +} + +// GetDetails returns the Details field. +func (s *SecretScanningAlertLocation) GetDetails() *SecretScanningAlertLocationDetails { + if s == nil { + return nil + } + return s.Details +} + +// GetType returns the Type field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocation) GetType() string { + if s == nil || s.Type == nil { + return "" + } + return *s.Type +} + +// GetBlobSHA returns the BlobSHA field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetBlobSHA() string { + if s == nil || s.BlobSHA == nil { + return "" + } + return *s.BlobSHA +} + +// GetBlobURL returns the BlobURL field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetBlobURL() string { + if s == nil || s.BlobURL == nil { + return "" + } + return *s.BlobURL +} + +// GetCommitSHA returns the CommitSHA field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetCommitSHA() string { + if s == nil || s.CommitSHA == nil { + return "" + } + return *s.CommitSHA +} + +// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetCommitURL() string { + if s == nil || s.CommitURL == nil { + return "" + } + return *s.CommitURL +} + +// GetEndColumn returns the EndColumn field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetEndColumn() int { + if s == nil || s.EndColumn == nil { + return 0 + } + return *s.EndColumn +} + +// GetEndLine returns the EndLine field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetEndLine() int { + if s == nil || s.EndLine == nil { + return 0 + } + return *s.EndLine +} + +// GetPath returns the Path field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetPath() string { + if s == nil || s.Path == nil { + return "" + } + return *s.Path +} + +// GetStartColumn returns the StartColumn field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetStartColumn() int { + if s == nil || s.StartColumn == nil { + return 0 + } + return *s.StartColumn +} + +// GetStartline returns the Startline field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertLocationDetails) GetStartline() int { + if s == nil || s.Startline == nil { + return 0 + } + return *s.Startline +} + +// GetResolution returns the Resolution field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertUpdateOptions) GetResolution() string { + if s == nil || s.Resolution == nil { + return "" + } + return *s.Resolution +} + +// GetSecretType returns the SecretType field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertUpdateOptions) GetSecretType() string { + if s == nil || s.SecretType == nil { + return "" + } + return *s.SecretType +} + +// GetState returns the State field if it's non-nil, zero value otherwise. +func (s *SecretScanningAlertUpdateOptions) GetState() string { + if s == nil || s.State == nil { + return "" + } + return *s.State +} + +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetDescription() string { + if s == nil || s.Description == nil { + return "" + } + return *s.Description +} + +// GetGHSAID returns the GHSAID field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetGHSAID() string { + if s == nil || s.GHSAID == nil { + return "" + } + return *s.GHSAID +} + +// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetPublishedAt() Timestamp { + if s == nil || s.PublishedAt == nil { + return Timestamp{} + } + return *s.PublishedAt +} + +// GetSeverity returns the Severity field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetSeverity() string { + if s == nil || s.Severity == nil { + return "" + } + return *s.Severity +} + +// GetSummary returns the Summary field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetSummary() string { + if s == nil || s.Summary == nil { + return "" + } + return *s.Summary +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetUpdatedAt() Timestamp { + if s == nil || s.UpdatedAt == nil { + return Timestamp{} + } + return *s.UpdatedAt +} + +// GetWithdrawnAt returns the WithdrawnAt field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisory) GetWithdrawnAt() Timestamp { + if s == nil || s.WithdrawnAt == nil { + return Timestamp{} + } + return *s.WithdrawnAt +} + +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (s *SecurityAdvisoryEvent) GetAction() string { + if s == nil || s.Action == nil { + return "" + } + return *s.Action +} + +// GetSecurityAdvisory returns the SecurityAdvisory field. +func (s *SecurityAdvisoryEvent) GetSecurityAdvisory() *SecurityAdvisory { + if s == nil { + return nil + } + return s.SecurityAdvisory +} + +// GetAdvancedSecurity returns the AdvancedSecurity field. +func (s *SecurityAndAnalysis) GetAdvancedSecurity() *AdvancedSecurity { + if s == nil { + return nil + } + return s.AdvancedSecurity +} + +// GetSecretScanning returns the SecretScanning field. +func (s *SecurityAndAnalysis) GetSecretScanning() *SecretScanning { + if s == nil { + return nil + } + return s.SecretScanning +} + +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (s *SelectedReposList) GetTotalCount() int { + if s == nil || s.TotalCount == nil { + return 0 + } + return *s.TotalCount +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (s *ServiceHook) GetName() string { + if s == nil || s.Name == nil { + return "" + } + return *s.Name +} + +// GetEnabled returns the Enabled field if it's non-nil, zero value otherwise. +func (s *SignaturesProtectedBranch) GetEnabled() bool { + if s == nil || s.Enabled == nil { + return false + } + return *s.Enabled +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (s *SignaturesProtectedBranch) GetURL() string { + if s == nil || s.URL == nil { + return "" + } + return *s.URL +} + +// GetPayload returns the Payload field if it's non-nil, zero value otherwise. +func (s *SignatureVerification) GetPayload() string { + if s == nil || s.Payload == nil { + return "" + } + return *s.Payload +} + +// GetReason returns the Reason field if it's non-nil, zero value otherwise. +func (s *SignatureVerification) GetReason() string { + if s == nil || s.Reason == nil { + return "" + } + return *s.Reason +} + +// GetSignature returns the Signature field if it's non-nil, zero value otherwise. +func (s *SignatureVerification) GetSignature() string { + if s == nil || s.Signature == nil { + return "" + } + return *s.Signature +} + +// GetVerified returns the Verified field if it's non-nil, zero value otherwise. +func (s *SignatureVerification) GetVerified() bool { + if s == nil || s.Verified == nil { + return false + } + return *s.Verified +} + +// GetActor returns the Actor field. +func (s *Source) GetActor() *User { + if s == nil { + return nil + } + return s.Actor +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (s *Source) GetID() int64 { + if s == nil || s.ID == nil { + return 0 } return *s.ID } @@ -17900,6 +18790,14 @@ func (u *User) GetReposURL() string { return *u.ReposURL } +// GetRoleName returns the RoleName field if it's non-nil, zero value otherwise. +func (u *User) GetRoleName() string { + if u == nil || u.RoleName == nil { + return "" + } + return *u.RoleName +} + // GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise. func (u *User) GetSiteAdmin() bool { if u == nil || u.SiteAdmin == nil { @@ -18404,208 +19302,40 @@ func (u *UserSuspendOptions) GetReason() string { return *u.Reason } -// GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WatchEvent) GetAction() string { - if w == nil || w.Action == nil { +// GetEcosystem returns the Ecosystem field if it's non-nil, zero value otherwise. +func (v *VulnerabilityPackage) GetEcosystem() string { + if v == nil || v.Ecosystem == nil { return "" } - return *w.Action -} - -// GetInstallation returns the Installation field. -func (w *WatchEvent) GetInstallation() *Installation { - if w == nil { - return nil - } - return w.Installation -} - -// GetRepo returns the Repo field. -func (w *WatchEvent) GetRepo() *Repository { - if w == nil { - return nil - } - return w.Repo -} - -// GetSender returns the Sender field. -func (w *WatchEvent) GetSender() *User { - if w == nil { - return nil - } - return w.Sender -} - -// GetEmail returns the Email field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetEmail() string { - if w == nil || w.Email == nil { - return "" - } - return *w.Email + return *v.Ecosystem } // GetName returns the Name field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetName() string { - if w == nil || w.Name == nil { - return "" - } - return *w.Name -} - -// GetUsername returns the Username field if it's non-nil, zero value otherwise. -func (w *WebHookAuthor) GetUsername() string { - if w == nil || w.Username == nil { - return "" - } - return *w.Username -} - -// GetAuthor returns the Author field. -func (w *WebHookCommit) GetAuthor() *WebHookAuthor { - if w == nil { - return nil - } - return w.Author -} - -// GetCommitter returns the Committer field. -func (w *WebHookCommit) GetCommitter() *WebHookAuthor { - if w == nil { - return nil - } - return w.Committer -} - -// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetDistinct() bool { - if w == nil || w.Distinct == nil { - return false - } - return *w.Distinct -} - -// GetID returns the ID field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetID() string { - if w == nil || w.ID == nil { - return "" - } - return *w.ID -} - -// GetMessage returns the Message field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetMessage() string { - if w == nil || w.Message == nil { +func (v *VulnerabilityPackage) GetName() string { + if v == nil || v.Name == nil { return "" } - return *w.Message -} - -// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise. -func (w *WebHookCommit) GetTimestamp() time.Time { - if w == nil || w.Timestamp == nil { - return time.Time{} - } - return *w.Timestamp + return *v.Name } // GetAction returns the Action field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetAction() string { +func (w *WatchEvent) GetAction() string { if w == nil || w.Action == nil { return "" } return *w.Action } -// GetAfter returns the After field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetAfter() string { - if w == nil || w.After == nil { - return "" - } - return *w.After -} - -// GetBefore returns the Before field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetBefore() string { - if w == nil || w.Before == nil { - return "" - } - return *w.Before -} - -// GetCompare returns the Compare field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetCompare() string { - if w == nil || w.Compare == nil { - return "" - } - return *w.Compare -} - -// GetCreated returns the Created field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetCreated() bool { - if w == nil || w.Created == nil { - return false - } - return *w.Created -} - -// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetDeleted() bool { - if w == nil || w.Deleted == nil { - return false - } - return *w.Deleted -} - -// GetForced returns the Forced field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetForced() bool { - if w == nil || w.Forced == nil { - return false - } - return *w.Forced -} - -// GetHeadCommit returns the HeadCommit field. -func (w *WebHookPayload) GetHeadCommit() *WebHookCommit { - if w == nil { - return nil - } - return w.HeadCommit -} - // GetInstallation returns the Installation field. -func (w *WebHookPayload) GetInstallation() *Installation { +func (w *WatchEvent) GetInstallation() *Installation { if w == nil { return nil } return w.Installation } -// GetOrganization returns the Organization field. -func (w *WebHookPayload) GetOrganization() *Organization { - if w == nil { - return nil - } - return w.Organization -} - -// GetPusher returns the Pusher field. -func (w *WebHookPayload) GetPusher() *User { - if w == nil { - return nil - } - return w.Pusher -} - -// GetRef returns the Ref field if it's non-nil, zero value otherwise. -func (w *WebHookPayload) GetRef() string { - if w == nil || w.Ref == nil { - return "" - } - return *w.Ref -} - // GetRepo returns the Repo field. -func (w *WebHookPayload) GetRepo() *Repository { +func (w *WatchEvent) GetRepo() *Repository { if w == nil { return nil } @@ -18613,7 +19343,7 @@ func (w *WebHookPayload) GetRepo() *Repository { } // GetSender returns the Sender field. -func (w *WebHookPayload) GetSender() *User { +func (w *WatchEvent) GetSender() *User { if w == nil { return nil } @@ -19012,6 +19742,14 @@ func (w *WorkflowJobEvent) GetWorkflowJob() *WorkflowJob { return w.WorkflowJob } +// GetActor returns the Actor field. +func (w *WorkflowRun) GetActor() *User { + if w == nil { + return nil + } + return w.Actor +} + // GetArtifactsURL returns the ArtifactsURL field if it's non-nil, zero value otherwise. func (w *WorkflowRun) GetArtifactsURL() string { if w == nil || w.ArtifactsURL == nil { @@ -19244,6 +19982,14 @@ func (w *WorkflowRun) GetWorkflowURL() string { return *w.WorkflowURL } +// GetExcludePullRequests returns the ExcludePullRequests field if it's non-nil, zero value otherwise. +func (w *WorkflowRunAttemptOptions) GetExcludePullRequests() bool { + if w == nil || w.ExcludePullRequests == nil { + return false + } + return *w.ExcludePullRequests +} + // GetJobs returns the Jobs field if it's non-nil, zero value otherwise. func (w *WorkflowRunBill) GetJobs() int { if w == nil || w.Jobs == nil { diff --git a/vendor/github.com/google/go-github/v42/github/github.go b/vendor/github.com/google/go-github/v45/github/github.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/github.go rename to vendor/github.com/google/go-github/v45/github/github.go index 4fb466e3a6..08b7db8e55 100644 --- a/vendor/github.com/google/go-github/v42/github/github.go +++ b/vendor/github.com/google/go-github/v45/github/github.go @@ -100,6 +100,9 @@ const ( // https://developer.github.com/changes/2018-03-16-protected-branches-required-approving-reviews/ mediaTypeRequiredApprovingReviewsPreview = "application/vnd.github.luke-cage-preview+json" + // https://developer.github.com/changes/2018-05-07-new-checks-api-public-beta/ + mediaTypeCheckRunsPreview = "application/vnd.github.antiope-preview+json" + // https://developer.github.com/enterprise/2.13/v3/repos/pre_receive_hooks/ mediaTypePreReceiveHooksPreview = "application/vnd.github.eye-scream-preview" @@ -127,10 +130,10 @@ const ( // https://developer.github.com/changes/2019-04-11-pulls-branches-for-commit/ mediaTypeListPullsOrBranchesForCommitPreview = "application/vnd.github.groot-preview+json" - // https://docs.github.com/en/free-pro-team@latest/rest/reference/previews/#repository-creation-permissions + // https://docs.github.com/en/rest/previews/#repository-creation-permissions mediaTypeMemberAllowedRepoCreationTypePreview = "application/vnd.github.surtur-preview+json" - // https://docs.github.com/en/free-pro-team@latest/rest/reference/previews/#create-and-use-repository-templates + // https://docs.github.com/en/rest/previews/#create-and-use-repository-templates mediaTypeRepositoryTemplatePreview = "application/vnd.github.baptiste-preview+json" // https://developer.github.com/changes/2019-10-03-multi-line-comments/ @@ -196,6 +199,7 @@ type Client struct { Repositories *RepositoriesService SCIM *SCIMService Search *SearchService + SecretScanning *SecretScanningService Teams *TeamsService Users *UsersService } @@ -325,6 +329,7 @@ func NewClient(httpClient *http.Client) *Client { c.Repositories = (*RepositoriesService)(&c.common) c.SCIM = (*SCIMService)(&c.common) c.Search = (*SearchService)(&c.common) + c.SecretScanning = (*SecretScanningService)(&c.common) c.Teams = (*TeamsService)(&c.common) c.Users = (*UsersService)(&c.common) return c @@ -348,6 +353,7 @@ func NewEnterpriseClient(baseURL, uploadURL string, httpClient *http.Client) (*C if err != nil { return nil, err } + if !strings.HasSuffix(baseEndpoint.Path, "/") { baseEndpoint.Path += "/" } @@ -361,6 +367,7 @@ func NewEnterpriseClient(baseURL, uploadURL string, httpClient *http.Client) (*C if err != nil { return nil, err } + if !strings.HasSuffix(uploadEndpoint.Path, "/") { uploadEndpoint.Path += "/" } @@ -385,6 +392,7 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ if !strings.HasSuffix(c.BaseURL.Path, "/") { return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) } + u, err := c.BaseURL.Parse(urlStr) if err != nil { return nil, err @@ -432,6 +440,7 @@ func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, m if err != nil { return nil, err } + req.ContentLength = size if mediaType == "" { @@ -616,6 +625,7 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro if ctx == nil { return nil, errNonNilContext } + req = withContext(ctx, req) rateLimitCategory := category(req.URL.Path) @@ -653,9 +663,13 @@ func (c *Client) BareDo(ctx context.Context, req *http.Request) (*Response, erro response := newResponse(resp) - c.rateMu.Lock() - c.rateLimits[rateLimitCategory] = response.Rate - c.rateMu.Unlock() + // Don't update the rate limits if this was a cached response. + // X-From-Cache is set by https://github.com/gregjones/httpcache + if response.Header.Get("X-From-Cache") == "" { + c.rateMu.Lock() + c.rateLimits[rateLimitCategory] = response.Rate + c.rateMu.Unlock() + } err = CheckResponse(resp) if err != nil { @@ -756,7 +770,7 @@ func compareHTTPResponse(r1, r2 *http.Response) bool { /* An ErrorResponse reports one or more errors caused by an API request. -GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/#client-errors +GitHub API docs: https://docs.github.com/en/rest/#client-errors */ type ErrorResponse struct { Response *http.Response // HTTP response that caused this error @@ -766,7 +780,7 @@ type ErrorResponse struct { Block *ErrorBlock `json:"block,omitempty"` // Most errors will also include a documentation_url field pointing // to some content that might help you resolve the error, see - // https://docs.github.com/en/free-pro-team@latest/rest/reference/#client-errors + // https://docs.github.com/en/rest/#client-errors DocumentationURL string `json:"documentation_url,omitempty"` } @@ -886,7 +900,7 @@ func (ae *AcceptedError) Is(target error) bool { } // AbuseRateLimitError occurs when GitHub returns 403 Forbidden response with the -// "documentation_url" field value equal to "https://docs.github.com/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#secondary-rate-limits". +// "documentation_url" field value equal to "https://docs.github.com/en/rest/overview/resources-in-the-rest-api#secondary-rate-limits". type AbuseRateLimitError struct { Response *http.Response // HTTP response that caused this error Message string `json:"message"` // error message @@ -949,7 +963,7 @@ GitHub error responses structure are often undocumented and inconsistent. Sometimes error is just a simple string (Issue #540). In such cases, Message represents an error message as a workaround. -GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/#client-errors +GitHub API docs: https://docs.github.com/en/rest/#client-errors */ type Error struct { Resource string `json:"resource"` // resource on which the error occurred @@ -987,6 +1001,7 @@ func CheckResponse(r *http.Response) error { if c := r.StatusCode; 200 <= c && c <= 299 { return nil } + errorResponse := &ErrorResponse{Response: r} data, err := ioutil.ReadAll(r.Body) if err == nil && data != nil { @@ -1067,15 +1082,26 @@ type RateLimits struct { // requests are limited to 60 per hour. Authenticated requests are // limited to 5,000 per hour. // - // GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/#rate-limiting + // GitHub API docs: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting Core *Rate `json:"core"` // The rate limit for search API requests. Unauthenticated requests // are limited to 10 requests per minutes. Authenticated requests are // limited to 30 per minute. // - // GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#rate-limit + // GitHub API docs: https://docs.github.com/en/rest/search#rate-limit Search *Rate `json:"search"` + + // GitHub API docs: https://docs.github.com/en/graphql/overview/resource-limitations#rate-limit + GraphQL *Rate `json:"graphql"` + + // GitHub API dos: https://docs.github.com/en/rest/rate-limit + IntegrationManifest *Rate `json:"integration_manifest"` + + SourceImport *Rate `json:"source_import"` + CodeScanningUpload *Rate `json:"code_scanning_upload"` + ActionsRunnerRegistration *Rate `json:"actions_runner_registration"` + SCIM *Rate `json:"scim"` } func (r RateLimits) String() string { @@ -1087,6 +1113,12 @@ type rateLimitCategory uint8 const ( coreCategory rateLimitCategory = iota searchCategory + graphqlCategory + integrationManifestCategory + sourceImportCategory + codeScanningUploadCategory + actionsRunnerRegistrationCategory + scimCategory categories // An array of this length will be able to contain all rate limit categories. ) @@ -1127,6 +1159,24 @@ func (c *Client) RateLimits(ctx context.Context) (*RateLimits, *Response, error) if response.Resources.Search != nil { c.rateLimits[searchCategory] = *response.Resources.Search } + if response.Resources.GraphQL != nil { + c.rateLimits[graphqlCategory] = *response.Resources.GraphQL + } + if response.Resources.IntegrationManifest != nil { + c.rateLimits[integrationManifestCategory] = *response.Resources.IntegrationManifest + } + if response.Resources.SourceImport != nil { + c.rateLimits[sourceImportCategory] = *response.Resources.SourceImport + } + if response.Resources.CodeScanningUpload != nil { + c.rateLimits[codeScanningUploadCategory] = *response.Resources.CodeScanningUpload + } + if response.Resources.ActionsRunnerRegistration != nil { + c.rateLimits[actionsRunnerRegistrationCategory] = *response.Resources.ActionsRunnerRegistration + } + if response.Resources.SCIM != nil { + c.rateLimits[scimCategory] = *response.Resources.SCIM + } c.rateMu.Unlock() } @@ -1164,7 +1214,7 @@ that need to use a higher rate limit associated with your OAuth application. This will add the client id and secret as a base64-encoded string in the format ClientID:ClientSecret and apply it as an "Authorization": "Basic" header. -See https://docs.github.com/en/free-pro-team@latest/rest/reference/#unauthenticated-rate-limited-requests for +See https://docs.github.com/en/rest/#unauthenticated-rate-limited-requests for more information. */ type UnauthenticatedRateLimitedTransport struct { @@ -1270,6 +1320,35 @@ func formatRateReset(d time.Duration) string { return fmt.Sprintf("[rate reset in %v]", timeString) } +// When using roundTripWithOptionalFollowRedirect, note that it +// is the responsibility of the caller to close the response body. +func (c *Client) roundTripWithOptionalFollowRedirect(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { + req, err := c.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + var resp *http.Response + // Use http.DefaultTransport if no custom Transport is configured + req = withContext(ctx, req) + if c.client.Transport == nil { + resp, err = http.DefaultTransport.RoundTrip(req) + } else { + resp, err = c.client.Transport.RoundTrip(req) + } + if err != nil { + return nil, err + } + + // If redirect response is returned, follow it + if followRedirects && resp.StatusCode == http.StatusMovedPermanently { + resp.Body.Close() + u = resp.Header.Get("Location") + resp, err = c.roundTripWithOptionalFollowRedirect(ctx, u, false) + } + return resp, err +} + // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } diff --git a/vendor/github.com/google/go-github/v42/github/gitignore.go b/vendor/github.com/google/go-github/v45/github/gitignore.go similarity index 81% rename from vendor/github.com/google/go-github/v42/github/gitignore.go rename to vendor/github.com/google/go-github/v45/github/gitignore.go index 2f9d0bcfb5..a20a868b44 100644 --- a/vendor/github.com/google/go-github/v42/github/gitignore.go +++ b/vendor/github.com/google/go-github/v45/github/gitignore.go @@ -13,7 +13,7 @@ import ( // GitignoresService provides access to the gitignore related functions in the // GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gitignore/ +// GitHub API docs: https://docs.github.com/en/rest/gitignore/ type GitignoresService service // Gitignore represents a .gitignore file as returned by the GitHub API. @@ -28,7 +28,7 @@ func (g Gitignore) String() string { // List all available Gitignore templates. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gitignore/#listing-available-templates +// GitHub API docs: https://docs.github.com/en/rest/gitignore/#listing-available-templates func (s *GitignoresService) List(ctx context.Context) ([]string, *Response, error) { req, err := s.client.NewRequest("GET", "gitignore/templates", nil) if err != nil { @@ -46,7 +46,7 @@ func (s *GitignoresService) List(ctx context.Context) ([]string, *Response, erro // Get a Gitignore by name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/gitignore/#get-a-gitignore-template +// GitHub API docs: https://docs.github.com/en/rest/gitignore#get-a-gitignore-template func (s *GitignoresService) Get(ctx context.Context, name string) (*Gitignore, *Response, error) { u := fmt.Sprintf("gitignore/templates/%v", name) req, err := s.client.NewRequest("GET", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/interactions.go b/vendor/github.com/google/go-github/v45/github/interactions.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/interactions.go rename to vendor/github.com/google/go-github/v45/github/interactions.go index 3b00d3c0d4..a690f61268 100644 --- a/vendor/github.com/google/go-github/v42/github/interactions.go +++ b/vendor/github.com/google/go-github/v45/github/interactions.go @@ -8,7 +8,7 @@ package github // InteractionsService handles communication with the repository and organization related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/ +// GitHub API docs: https://docs.github.com/en/rest/interactions/ type InteractionsService service // InteractionRestriction represents the interaction restrictions for repository and organization. diff --git a/vendor/github.com/google/go-github/v42/github/interactions_orgs.go b/vendor/github.com/google/go-github/v45/github/interactions_orgs.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/interactions_orgs.go rename to vendor/github.com/google/go-github/v45/github/interactions_orgs.go index d22a9e748d..5c7663f583 100644 --- a/vendor/github.com/google/go-github/v42/github/interactions_orgs.go +++ b/vendor/github.com/google/go-github/v45/github/interactions_orgs.go @@ -12,7 +12,7 @@ import ( // GetRestrictionsForOrg fetches the interaction restrictions for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#get-interaction-restrictions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#get-interaction-restrictions-for-an-organization func (s *InteractionsService) GetRestrictionsForOrg(ctx context.Context, organization string) (*InteractionRestriction, *Response, error) { u := fmt.Sprintf("orgs/%v/interaction-limits", organization) req, err := s.client.NewRequest("GET", u, nil) @@ -39,7 +39,7 @@ func (s *InteractionsService) GetRestrictionsForOrg(ctx context.Context, organiz // in public repositories for the given organization. // Possible values are: "existing_users", "contributors_only", "collaborators_only". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#set-interaction-restrictions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#set-interaction-restrictions-for-an-organization func (s *InteractionsService) UpdateRestrictionsForOrg(ctx context.Context, organization, limit string) (*InteractionRestriction, *Response, error) { u := fmt.Sprintf("orgs/%v/interaction-limits", organization) @@ -65,7 +65,7 @@ func (s *InteractionsService) UpdateRestrictionsForOrg(ctx context.Context, orga // RemoveRestrictionsFromOrg removes the interaction restrictions for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#remove-interaction-restrictions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/interactions/orgs#remove-interaction-restrictions-for-an-organization func (s *InteractionsService) RemoveRestrictionsFromOrg(ctx context.Context, organization string) (*Response, error) { u := fmt.Sprintf("orgs/%v/interaction-limits", organization) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/interactions_repos.go b/vendor/github.com/google/go-github/v45/github/interactions_repos.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/interactions_repos.go rename to vendor/github.com/google/go-github/v45/github/interactions_repos.go index 13fffd64c0..41e6c5319d 100644 --- a/vendor/github.com/google/go-github/v42/github/interactions_repos.go +++ b/vendor/github.com/google/go-github/v45/github/interactions_repos.go @@ -12,7 +12,7 @@ import ( // GetRestrictionsForRepo fetches the interaction restrictions for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#get-interaction-restrictions-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#get-interaction-restrictions-for-a-repository func (s *InteractionsService) GetRestrictionsForRepo(ctx context.Context, owner, repo string) (*InteractionRestriction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -39,7 +39,7 @@ func (s *InteractionsService) GetRestrictionsForRepo(ctx context.Context, owner, // for the given repository. // Possible values are: "existing_users", "contributors_only", "collaborators_only". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#set-interaction-restrictions-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#set-interaction-restrictions-for-a-repository func (s *InteractionsService) UpdateRestrictionsForRepo(ctx context.Context, owner, repo, limit string) (*InteractionRestriction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) @@ -65,7 +65,7 @@ func (s *InteractionsService) UpdateRestrictionsForRepo(ctx context.Context, own // RemoveRestrictionsFromRepo removes the interaction restrictions for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/interactions/#remove-interaction-restrictions-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/interactions/repos#remove-interaction-restrictions-for-a-repository func (s *InteractionsService) RemoveRestrictionsFromRepo(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/interaction-limits", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/issue_import.go b/vendor/github.com/google/go-github/v45/github/issue_import.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/issue_import.go rename to vendor/github.com/google/go-github/v45/github/issue_import.go diff --git a/vendor/github.com/google/go-github/v42/github/issues.go b/vendor/github.com/google/go-github/v45/github/issues.go similarity index 89% rename from vendor/github.com/google/go-github/v42/github/issues.go rename to vendor/github.com/google/go-github/v45/github/issues.go index f35f2b566a..12488f9815 100644 --- a/vendor/github.com/google/go-github/v42/github/issues.go +++ b/vendor/github.com/google/go-github/v45/github/issues.go @@ -14,7 +14,7 @@ import ( // IssuesService handles communication with the issue related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/ +// GitHub API docs: https://docs.github.com/en/rest/issues/ type IssuesService service // Issue represents a GitHub issue on a repository. @@ -54,7 +54,7 @@ type Issue struct { NodeID *string `json:"node_id,omitempty"` // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#text-match-metadata + // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata TextMatches []*TextMatch `json:"text_matches,omitempty"` // ActiveLockReason is populated only when LockReason is provided while locking the issue. @@ -128,8 +128,8 @@ type PullRequestLinks struct { // organization repositories; if false, list only owned and member // repositories. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-user-account-issues-assigned-to-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issues-assigned-to-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-user-account-issues-assigned-to-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-issues-assigned-to-the-authenticated-user func (s *IssuesService) List(ctx context.Context, all bool, opts *IssueListOptions) ([]*Issue, *Response, error) { var u string if all { @@ -143,7 +143,7 @@ func (s *IssuesService) List(ctx context.Context, all bool, opts *IssueListOptio // ListByOrg fetches the issues in the specified organization for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-organization-issues-assigned-to-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-organization-issues-assigned-to-the-authenticated-user func (s *IssuesService) ListByOrg(ctx context.Context, org string, opts *IssueListOptions) ([]*Issue, *Response, error) { u := fmt.Sprintf("orgs/%v/issues", org) return s.listIssues(ctx, u, opts) @@ -214,7 +214,7 @@ type IssueListByRepoOptions struct { // ListByRepo lists the issues for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-repository-issues +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#list-repository-issues func (s *IssuesService) ListByRepo(ctx context.Context, owner string, repo string, opts *IssueListByRepoOptions) ([]*Issue, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) u, err := addOptions(u, opts) @@ -241,7 +241,7 @@ func (s *IssuesService) ListByRepo(ctx context.Context, owner string, repo strin // Get a single issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#get-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#get-an-issue func (s *IssuesService) Get(ctx context.Context, owner string, repo string, number int) (*Issue, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) req, err := s.client.NewRequest("GET", u, nil) @@ -263,7 +263,7 @@ func (s *IssuesService) Get(ctx context.Context, owner string, repo string, numb // Create a new issue on the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#create-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#create-an-issue func (s *IssuesService) Create(ctx context.Context, owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues", owner, repo) req, err := s.client.NewRequest("POST", u, issue) @@ -280,9 +280,9 @@ func (s *IssuesService) Create(ctx context.Context, owner string, repo string, i return i, resp, nil } -// Edit an issue. +// Edit (update) an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#update-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#update-an-issue func (s *IssuesService) Edit(ctx context.Context, owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number) req, err := s.client.NewRequest("PATCH", u, issue) @@ -303,7 +303,7 @@ func (s *IssuesService) Edit(ctx context.Context, owner string, repo string, num // // This is a helper method to explicitly update an issue with a `null` milestone, thereby removing it. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#update-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#update-an-issue func (s *IssuesService) RemoveMilestone(ctx context.Context, owner, repo string, issueNumber int) (*Issue, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%v", owner, repo, issueNumber) req, err := s.client.NewRequest("PATCH", u, &struct { @@ -333,7 +333,7 @@ type LockIssueOptions struct { // Lock an issue's conversation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#lock-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#lock-an-issue func (s *IssuesService) Lock(ctx context.Context, owner string, repo string, number int, opts *LockIssueOptions) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) req, err := s.client.NewRequest("PUT", u, opts) @@ -346,7 +346,7 @@ func (s *IssuesService) Lock(ctx context.Context, owner string, repo string, num // Unlock an issue's conversation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#unlock-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/issues#unlock-an-issue func (s *IssuesService) Unlock(ctx context.Context, owner string, repo string, number int) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/issues_assignees.go b/vendor/github.com/google/go-github/v45/github/issues_assignees.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/issues_assignees.go rename to vendor/github.com/google/go-github/v45/github/issues_assignees.go index 9f15aea43f..b7f2e80243 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_assignees.go +++ b/vendor/github.com/google/go-github/v45/github/issues_assignees.go @@ -13,7 +13,7 @@ import ( // ListAssignees fetches all available assignees (owners and collaborators) to // which issues may be assigned. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-assignees +// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#list-assignees func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, opts *ListOptions) ([]*User, *Response, error) { u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo) u, err := addOptions(u, opts) @@ -25,6 +25,7 @@ func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, o if err != nil { return nil, nil, err } + var assignees []*User resp, err := s.client.Do(ctx, req, &assignees) if err != nil { @@ -36,13 +37,14 @@ func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, o // IsAssignee checks if a user is an assignee for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#check-if-a-user-can-be-assigned +// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#check-if-a-user-can-be-assigned func (s *IssuesService) IsAssignee(ctx context.Context, owner, repo, user string) (bool, *Response, error) { u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return false, nil, err } + resp, err := s.client.Do(ctx, req, nil) assignee, err := parseBoolResponse(err) return assignee, resp, err @@ -50,7 +52,7 @@ func (s *IssuesService) IsAssignee(ctx context.Context, owner, repo, user string // AddAssignees adds the provided GitHub users as assignees to the issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#add-assignees-to-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#add-assignees-to-an-issue func (s *IssuesService) AddAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { users := &struct { Assignees []string `json:"assignees,omitempty"` @@ -63,12 +65,16 @@ func (s *IssuesService) AddAssignees(ctx context.Context, owner, repo string, nu issue := &Issue{} resp, err := s.client.Do(ctx, req, issue) - return issue, resp, err + if err != nil { + return nil, resp, err + } + + return issue, resp, nil } // RemoveAssignees removes the provided GitHub users as assignees from the issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#remove-assignees-from-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/assignees#remove-assignees-from-an-issue func (s *IssuesService) RemoveAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) { users := &struct { Assignees []string `json:"assignees,omitempty"` @@ -81,5 +87,9 @@ func (s *IssuesService) RemoveAssignees(ctx context.Context, owner, repo string, issue := &Issue{} resp, err := s.client.Do(ctx, req, issue) - return issue, resp, err + if err != nil { + return nil, resp, err + } + + return issue, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/issues_comments.go b/vendor/github.com/google/go-github/v45/github/issues_comments.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/issues_comments.go rename to vendor/github.com/google/go-github/v45/github/issues_comments.go index 6dd6d13287..361ee49a69 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_comments.go +++ b/vendor/github.com/google/go-github/v45/github/issues_comments.go @@ -50,8 +50,8 @@ type IssueListCommentsOptions struct { // ListComments lists all comments on the specified issue. Specifying an issue // number of 0 will return all comments on all issues for the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issue-comments -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issue-comments-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#list-issue-comments +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#list-issue-comments-for-a-repository func (s *IssuesService) ListComments(ctx context.Context, owner string, repo string, number int, opts *IssueListCommentsOptions) ([]*IssueComment, *Response, error) { var u string if number == 0 { @@ -83,7 +83,7 @@ func (s *IssuesService) ListComments(ctx context.Context, owner string, repo str // GetComment fetches the specified issue comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#get-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#get-an-issue-comment func (s *IssuesService) GetComment(ctx context.Context, owner string, repo string, commentID int64) (*IssueComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) @@ -106,7 +106,7 @@ func (s *IssuesService) GetComment(ctx context.Context, owner string, repo strin // CreateComment creates a new comment on the specified issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#create-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#create-an-issue-comment func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number) req, err := s.client.NewRequest("POST", u, comment) @@ -125,7 +125,7 @@ func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo st // EditComment updates an issue comment. // A non-nil comment.Body must be provided. Other comment fields should be left nil. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#update-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#update-an-issue-comment func (s *IssuesService) EditComment(ctx context.Context, owner string, repo string, commentID int64, comment *IssueComment) (*IssueComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) req, err := s.client.NewRequest("PATCH", u, comment) @@ -143,7 +143,7 @@ func (s *IssuesService) EditComment(ctx context.Context, owner string, repo stri // DeleteComment deletes an issue comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#delete-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/issues/comments#delete-an-issue-comment func (s *IssuesService) DeleteComment(ctx context.Context, owner string, repo string, commentID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, commentID) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/issues_events.go b/vendor/github.com/google/go-github/v45/github/issues_events.go similarity index 94% rename from vendor/github.com/google/go-github/v42/github/issues_events.go rename to vendor/github.com/google/go-github/v45/github/issues_events.go index 384779cfed..d8ffc0b542 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_events.go +++ b/vendor/github.com/google/go-github/v45/github/issues_events.go @@ -100,7 +100,7 @@ type DismissedReview struct { // ListIssueEvents lists events for the specified issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issue-events +// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events func (s *IssuesService) ListIssueEvents(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*IssueEvent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number) u, err := addOptions(u, opts) @@ -126,7 +126,7 @@ func (s *IssuesService) ListIssueEvents(ctx context.Context, owner, repo string, // ListRepositoryEvents lists events for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-issue-events-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/issues/events#list-issue-events-for-a-repository func (s *IssuesService) ListRepositoryEvents(ctx context.Context, owner, repo string, opts *ListOptions) ([]*IssueEvent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo) u, err := addOptions(u, opts) @@ -150,7 +150,7 @@ func (s *IssuesService) ListRepositoryEvents(ctx context.Context, owner, repo st // GetEvent returns the specified issue event. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#get-an-issue-event +// GitHub API docs: https://docs.github.com/en/rest/issues/events#get-an-issue-event func (s *IssuesService) GetEvent(ctx context.Context, owner, repo string, id int64) (*IssueEvent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id) diff --git a/vendor/github.com/google/go-github/v42/github/issues_labels.go b/vendor/github.com/google/go-github/v45/github/issues_labels.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/issues_labels.go rename to vendor/github.com/google/go-github/v45/github/issues_labels.go index 40f069a9d1..d0f865c03f 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_labels.go +++ b/vendor/github.com/google/go-github/v45/github/issues_labels.go @@ -27,7 +27,7 @@ func (l Label) String() string { // ListLabels lists all labels for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-labels-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-a-repository func (s *IssuesService) ListLabels(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) u, err := addOptions(u, opts) @@ -51,7 +51,7 @@ func (s *IssuesService) ListLabels(ctx context.Context, owner string, repo strin // GetLabel gets a single label. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#get-a-label +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#get-a-label func (s *IssuesService) GetLabel(ctx context.Context, owner string, repo string, name string) (*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) req, err := s.client.NewRequest("GET", u, nil) @@ -70,7 +70,7 @@ func (s *IssuesService) GetLabel(ctx context.Context, owner string, repo string, // CreateLabel creates a new label on the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#create-a-label +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#create-a-label func (s *IssuesService) CreateLabel(ctx context.Context, owner string, repo string, label *Label) (*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/labels", owner, repo) req, err := s.client.NewRequest("POST", u, label) @@ -89,7 +89,7 @@ func (s *IssuesService) CreateLabel(ctx context.Context, owner string, repo stri // EditLabel edits a label. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#update-a-label +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#update-a-label func (s *IssuesService) EditLabel(ctx context.Context, owner string, repo string, name string, label *Label) (*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) req, err := s.client.NewRequest("PATCH", u, label) @@ -108,7 +108,7 @@ func (s *IssuesService) EditLabel(ctx context.Context, owner string, repo string // DeleteLabel deletes a label. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#delete-a-label +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#delete-a-label func (s *IssuesService) DeleteLabel(ctx context.Context, owner string, repo string, name string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name) req, err := s.client.NewRequest("DELETE", u, nil) @@ -120,7 +120,7 @@ func (s *IssuesService) DeleteLabel(ctx context.Context, owner string, repo stri // ListLabelsByIssue lists all labels for an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-labels-for-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-an-issue func (s *IssuesService) ListLabelsByIssue(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) u, err := addOptions(u, opts) @@ -144,7 +144,7 @@ func (s *IssuesService) ListLabelsByIssue(ctx context.Context, owner string, rep // AddLabelsToIssue adds labels to an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#add-labels-to-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#add-labels-to-an-issue func (s *IssuesService) AddLabelsToIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) req, err := s.client.NewRequest("POST", u, labels) @@ -163,7 +163,7 @@ func (s *IssuesService) AddLabelsToIssue(ctx context.Context, owner string, repo // RemoveLabelForIssue removes a label for an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#remove-a-label-from-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#remove-a-label-from-an-issue func (s *IssuesService) RemoveLabelForIssue(ctx context.Context, owner string, repo string, number int, label string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label) req, err := s.client.NewRequest("DELETE", u, nil) @@ -176,7 +176,7 @@ func (s *IssuesService) RemoveLabelForIssue(ctx context.Context, owner string, r // ReplaceLabelsForIssue replaces all labels for an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#set-labels-for-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#set-labels-for-an-issue func (s *IssuesService) ReplaceLabelsForIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) req, err := s.client.NewRequest("PUT", u, labels) @@ -195,7 +195,7 @@ func (s *IssuesService) ReplaceLabelsForIssue(ctx context.Context, owner string, // RemoveLabelsForIssue removes all labels for an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#remove-all-labels-from-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#remove-all-labels-from-an-issue func (s *IssuesService) RemoveLabelsForIssue(ctx context.Context, owner string, repo string, number int) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number) req, err := s.client.NewRequest("DELETE", u, nil) @@ -208,7 +208,7 @@ func (s *IssuesService) RemoveLabelsForIssue(ctx context.Context, owner string, // ListLabelsForMilestone lists labels for every issue in a milestone. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-labels-for-issues-in-a-milestone +// GitHub API docs: https://docs.github.com/en/rest/issues/labels#list-labels-for-issues-in-a-milestone func (s *IssuesService) ListLabelsForMilestone(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*Label, *Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/issues_milestones.go b/vendor/github.com/google/go-github/v45/github/issues_milestones.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/issues_milestones.go rename to vendor/github.com/google/go-github/v45/github/issues_milestones.go index f9b1f19335..3c9be2407e 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_milestones.go +++ b/vendor/github.com/google/go-github/v45/github/issues_milestones.go @@ -55,7 +55,7 @@ type MilestoneListOptions struct { // ListMilestones lists all milestones for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-milestones +// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#list-milestones func (s *IssuesService) ListMilestones(ctx context.Context, owner string, repo string, opts *MilestoneListOptions) ([]*Milestone, *Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) u, err := addOptions(u, opts) @@ -79,7 +79,7 @@ func (s *IssuesService) ListMilestones(ctx context.Context, owner string, repo s // GetMilestone gets a single milestone. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#get-a-milestone +// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#get-a-milestone func (s *IssuesService) GetMilestone(ctx context.Context, owner string, repo string, number int) (*Milestone, *Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) req, err := s.client.NewRequest("GET", u, nil) @@ -98,7 +98,7 @@ func (s *IssuesService) GetMilestone(ctx context.Context, owner string, repo str // CreateMilestone creates a new milestone on the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#create-a-milestone +// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#create-a-milestone func (s *IssuesService) CreateMilestone(ctx context.Context, owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo) req, err := s.client.NewRequest("POST", u, milestone) @@ -117,7 +117,7 @@ func (s *IssuesService) CreateMilestone(ctx context.Context, owner string, repo // EditMilestone edits a milestone. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#update-a-milestone +// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#update-a-milestone func (s *IssuesService) EditMilestone(ctx context.Context, owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) req, err := s.client.NewRequest("PATCH", u, milestone) @@ -136,7 +136,7 @@ func (s *IssuesService) EditMilestone(ctx context.Context, owner string, repo st // DeleteMilestone deletes a milestone. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#delete-a-milestone +// GitHub API docs: https://docs.github.com/en/rest/issues/milestones#delete-a-milestone func (s *IssuesService) DeleteMilestone(ctx context.Context, owner string, repo string, number int) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/issues_timeline.go b/vendor/github.com/google/go-github/v45/github/issues_timeline.go similarity index 97% rename from vendor/github.com/google/go-github/v42/github/issues_timeline.go rename to vendor/github.com/google/go-github/v45/github/issues_timeline.go index 845e3f767c..9ec498e45c 100644 --- a/vendor/github.com/google/go-github/v42/github/issues_timeline.go +++ b/vendor/github.com/google/go-github/v45/github/issues_timeline.go @@ -161,7 +161,7 @@ type Source struct { // ListIssueTimeline lists events for the specified issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/issues/#list-timeline-events-for-an-issue +// GitHub API docs: https://docs.github.com/en/rest/issues/timeline#list-timeline-events-for-an-issue func (s *IssuesService) ListIssueTimeline(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*Timeline, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number) u, err := addOptions(u, opts) @@ -180,5 +180,9 @@ func (s *IssuesService) ListIssueTimeline(ctx context.Context, owner, repo strin var events []*Timeline resp, err := s.client.Do(ctx, req, &events) - return events, resp, err + if err != nil { + return nil, resp, err + } + + return events, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/licenses.go b/vendor/github.com/google/go-github/v45/github/licenses.go similarity index 89% rename from vendor/github.com/google/go-github/v42/github/licenses.go rename to vendor/github.com/google/go-github/v45/github/licenses.go index 85e3626617..0877b6d183 100644 --- a/vendor/github.com/google/go-github/v42/github/licenses.go +++ b/vendor/github.com/google/go-github/v45/github/licenses.go @@ -13,7 +13,7 @@ import ( // LicensesService handles communication with the license related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/licenses/ +// GitHub API docs: https://docs.github.com/en/rest/licenses/ type LicensesService service // RepositoryLicense represents the license for a repository. @@ -60,7 +60,7 @@ func (l License) String() string { // List popular open source licenses. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/licenses/#list-all-licenses +// GitHub API docs: https://docs.github.com/en/rest/licenses/#list-all-licenses func (s *LicensesService) List(ctx context.Context) ([]*License, *Response, error) { req, err := s.client.NewRequest("GET", "licenses", nil) if err != nil { @@ -78,7 +78,7 @@ func (s *LicensesService) List(ctx context.Context) ([]*License, *Response, erro // Get extended metadata for one license. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/licenses/#get-a-license +// GitHub API docs: https://docs.github.com/en/rest/licenses#get-a-license func (s *LicensesService) Get(ctx context.Context, licenseName string) (*License, *Response, error) { u := fmt.Sprintf("licenses/%s", licenseName) diff --git a/vendor/github.com/google/go-github/v42/github/messages.go b/vendor/github.com/google/go-github/v45/github/messages.go similarity index 98% rename from vendor/github.com/google/go-github/v42/github/messages.go rename to vendor/github.com/google/go-github/v45/github/messages.go index 2e69c523e9..44477ddb0d 100644 --- a/vendor/github.com/google/go-github/v42/github/messages.go +++ b/vendor/github.com/google/go-github/v45/github/messages.go @@ -81,12 +81,15 @@ var ( "pull_request": "PullRequestEvent", "pull_request_review": "PullRequestReviewEvent", "pull_request_review_comment": "PullRequestReviewCommentEvent", + "pull_request_review_thread": "PullRequestReviewThreadEvent", "pull_request_target": "PullRequestTargetEvent", "push": "PushEvent", "repository": "RepositoryEvent", "repository_dispatch": "RepositoryDispatchEvent", + "repository_import": "RepositoryImportEvent", "repository_vulnerability_alert": "RepositoryVulnerabilityAlertEvent", "release": "ReleaseEvent", + "secret_scanning_alert": "SecretScanningAlertEvent", "star": "StarEvent", "status": "StatusEvent", "team": "TeamEvent", diff --git a/vendor/github.com/google/go-github/v42/github/migrations.go b/vendor/github.com/google/go-github/v45/github/migrations.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/migrations.go rename to vendor/github.com/google/go-github/v45/github/migrations.go index 7694021f1f..67989c0789 100644 --- a/vendor/github.com/google/go-github/v42/github/migrations.go +++ b/vendor/github.com/google/go-github/v45/github/migrations.go @@ -16,7 +16,7 @@ import ( // MigrationService provides access to the migration related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migration/ +// GitHub API docs: https://docs.github.com/en/rest/migration/ type MigrationService service // Migration represents a GitHub migration (archival). @@ -74,7 +74,7 @@ type startMigration struct { // StartMigration starts the generation of a migration archive. // repos is a slice of repository names to migrate. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#start-an-organization-migration +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#start-an-organization-migration func (s *MigrationService) StartMigration(ctx context.Context, org string, repos []string, opts *MigrationOptions) (*Migration, *Response, error) { u := fmt.Sprintf("orgs/%v/migrations", org) @@ -103,7 +103,7 @@ func (s *MigrationService) StartMigration(ctx context.Context, org string, repos // ListMigrations lists the most recent migrations. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#list-organization-migrations +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#list-organization-migrations func (s *MigrationService) ListMigrations(ctx context.Context, org string, opts *ListOptions) ([]*Migration, *Response, error) { u := fmt.Sprintf("orgs/%v/migrations", org) u, err := addOptions(u, opts) @@ -131,7 +131,7 @@ func (s *MigrationService) ListMigrations(ctx context.Context, org string, opts // MigrationStatus gets the status of a specific migration archive. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#get-an-organization-migration-status +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#get-an-organization-migration-status func (s *MigrationService) MigrationStatus(ctx context.Context, org string, id int64) (*Migration, *Response, error) { u := fmt.Sprintf("orgs/%v/migrations/%v", org, id) @@ -155,7 +155,7 @@ func (s *MigrationService) MigrationStatus(ctx context.Context, org string, id i // MigrationArchiveURL fetches a migration archive URL. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#download-an-organization-migration-archive +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#download-an-organization-migration-archive func (s *MigrationService) MigrationArchiveURL(ctx context.Context, org string, id int64) (url string, err error) { u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) @@ -192,7 +192,7 @@ func (s *MigrationService) MigrationArchiveURL(ctx context.Context, org string, // DeleteMigration deletes a previous migration archive. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#delete-an-organization-migration-archive +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#delete-an-organization-migration-archive func (s *MigrationService) DeleteMigration(ctx context.Context, org string, id int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id) @@ -212,7 +212,7 @@ func (s *MigrationService) DeleteMigration(ctx context.Context, org string, id i // You should unlock each migrated repository and delete them when the migration // is complete and you no longer need the source data. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#unlock-an-organization-repository +// GitHub API docs: https://docs.github.com/en/rest/migrations/orgs#unlock-an-organization-repository func (s *MigrationService) UnlockRepo(ctx context.Context, org string, id int64, repo string) (*Response, error) { u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo) diff --git a/vendor/github.com/google/go-github/v42/github/migrations_source_import.go b/vendor/github.com/google/go-github/v45/github/migrations_source_import.go similarity index 90% rename from vendor/github.com/google/go-github/v42/github/migrations_source_import.go rename to vendor/github.com/google/go-github/v45/github/migrations_source_import.go index e34b3acdac..74a04b22a4 100644 --- a/vendor/github.com/google/go-github/v42/github/migrations_source_import.go +++ b/vendor/github.com/google/go-github/v45/github/migrations_source_import.go @@ -115,7 +115,7 @@ func (i Import) String() string { // SourceImportAuthor identifies an author imported from a source repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migration/source_imports/#get-commit-authors +// GitHub API docs: https://docs.github.com/en/rest/migration/source_imports/#get-commit-authors type SourceImportAuthor struct { ID *int64 `json:"id,omitempty"` RemoteID *string `json:"remote_id,omitempty"` @@ -132,7 +132,7 @@ func (a SourceImportAuthor) String() string { // LargeFile identifies a file larger than 100MB found during a repository import. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migration/source_imports/#get-large-files +// GitHub API docs: https://docs.github.com/en/rest/migration/source_imports/#get-large-files type LargeFile struct { RefName *string `json:"ref_name,omitempty"` Path *string `json:"path,omitempty"` @@ -146,7 +146,7 @@ func (f LargeFile) String() string { // StartImport initiates a repository import. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#start-an-import +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#start-an-import func (s *MigrationService) StartImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import", owner, repo) req, err := s.client.NewRequest("PUT", u, in) @@ -165,7 +165,7 @@ func (s *MigrationService) StartImport(ctx context.Context, owner, repo string, // ImportProgress queries for the status and progress of an ongoing repository import. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#get-an-import-status +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-an-import-status func (s *MigrationService) ImportProgress(ctx context.Context, owner, repo string) (*Import, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -184,7 +184,7 @@ func (s *MigrationService) ImportProgress(ctx context.Context, owner, repo strin // UpdateImport initiates a repository import. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#update-an-import +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#update-an-import func (s *MigrationService) UpdateImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import", owner, repo) req, err := s.client.NewRequest("PATCH", u, in) @@ -213,7 +213,7 @@ func (s *MigrationService) UpdateImport(ctx context.Context, owner, repo string, // This method and MapCommitAuthor allow you to provide correct Git author // information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#get-commit-authors +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-commit-authors func (s *MigrationService) CommitAuthors(ctx context.Context, owner, repo string) ([]*SourceImportAuthor, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -234,7 +234,7 @@ func (s *MigrationService) CommitAuthors(ctx context.Context, owner, repo string // application can continue updating authors any time before you push new // commits to the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#map-a-commit-author +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#map-a-commit-author func (s *MigrationService) MapCommitAuthor(ctx context.Context, owner, repo string, id int64, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id) req, err := s.client.NewRequest("PATCH", u, author) @@ -255,7 +255,7 @@ func (s *MigrationService) MapCommitAuthor(ctx context.Context, owner, repo stri // files larger than 100MB. Only the UseLFS field on the provided Import is // used. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#update-git-lfs-preference +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#update-git-lfs-preference func (s *MigrationService) SetLFSPreference(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo) req, err := s.client.NewRequest("PATCH", u, in) @@ -274,7 +274,7 @@ func (s *MigrationService) SetLFSPreference(ctx context.Context, owner, repo str // LargeFiles lists files larger than 100MB found during the import. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#get-large-files +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#get-large-files func (s *MigrationService) LargeFiles(ctx context.Context, owner, repo string) ([]*LargeFile, *Response, error) { u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -293,7 +293,7 @@ func (s *MigrationService) LargeFiles(ctx context.Context, owner, repo string) ( // CancelImport stops an import for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#cancel-an-import +// GitHub API docs: https://docs.github.com/en/rest/migrations/source-imports#cancel-an-import func (s *MigrationService) CancelImport(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/import", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/migrations_user.go b/vendor/github.com/google/go-github/v45/github/migrations_user.go similarity index 89% rename from vendor/github.com/google/go-github/v42/github/migrations_user.go rename to vendor/github.com/google/go-github/v45/github/migrations_user.go index 5e8aaec5aa..b8a0d608d6 100644 --- a/vendor/github.com/google/go-github/v42/github/migrations_user.go +++ b/vendor/github.com/google/go-github/v45/github/migrations_user.go @@ -67,7 +67,7 @@ type startUserMigration struct { // StartUserMigration starts the generation of a migration archive. // repos is a slice of repository names to migrate. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#start-a-user-migration +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#start-a-user-migration func (s *MigrationService) StartUserMigration(ctx context.Context, repos []string, opts *UserMigrationOptions) (*UserMigration, *Response, error) { u := "user/migrations" @@ -96,7 +96,7 @@ func (s *MigrationService) StartUserMigration(ctx context.Context, repos []strin // ListUserMigrations lists the most recent migrations. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#list-user-migrations +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#list-user-migrations func (s *MigrationService) ListUserMigrations(ctx context.Context) ([]*UserMigration, *Response, error) { u := "user/migrations" @@ -120,7 +120,7 @@ func (s *MigrationService) ListUserMigrations(ctx context.Context) ([]*UserMigra // UserMigrationStatus gets the status of a specific migration archive. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#get-a-user-migration-status +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#get-a-user-migration-status func (s *MigrationService) UserMigrationStatus(ctx context.Context, id int64) (*UserMigration, *Response, error) { u := fmt.Sprintf("user/migrations/%v", id) @@ -144,7 +144,7 @@ func (s *MigrationService) UserMigrationStatus(ctx context.Context, id int64) (* // UserMigrationArchiveURL gets the URL for a specific migration archive. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#download-a-user-migration-archive +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#download-a-user-migration-archive func (s *MigrationService) UserMigrationArchiveURL(ctx context.Context, id int64) (string, error) { url := fmt.Sprintf("user/migrations/%v/archive", id) @@ -178,7 +178,7 @@ func (s *MigrationService) UserMigrationArchiveURL(ctx context.Context, id int64 // DeleteUserMigration will delete a previous migration archive. // id is the migration ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#delete-a-user-migration-archive +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#delete-a-user-migration-archive func (s *MigrationService) DeleteUserMigration(ctx context.Context, id int64) (*Response, error) { url := fmt.Sprintf("user/migrations/%v/archive", id) @@ -198,7 +198,7 @@ func (s *MigrationService) DeleteUserMigration(ctx context.Context, id int64) (* // You should unlock each migrated repository and delete them when the migration // is complete and you no longer need the source data. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/migrations/#unlock-a-user-repository +// GitHub API docs: https://docs.github.com/en/rest/migrations/users#unlock-a-user-repository func (s *MigrationService) UnlockUserRepo(ctx context.Context, id int64, repo string) (*Response, error) { url := fmt.Sprintf("user/migrations/%v/repos/%v/lock", id, repo) diff --git a/vendor/github.com/google/go-github/v42/github/misc.go b/vendor/github.com/google/go-github/v45/github/misc.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/misc.go rename to vendor/github.com/google/go-github/v45/github/misc.go index 1fa6c4e130..412d1e2b95 100644 --- a/vendor/github.com/google/go-github/v42/github/misc.go +++ b/vendor/github.com/google/go-github/v45/github/misc.go @@ -39,7 +39,7 @@ type markdownRequest struct { // Markdown renders an arbitrary Markdown document. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/markdown/ +// GitHub API docs: https://docs.github.com/en/rest/markdown/ func (c *Client) Markdown(ctx context.Context, text string, opts *MarkdownOptions) (string, *Response, error) { request := &markdownRequest{Text: String(text)} if opts != nil { @@ -67,7 +67,7 @@ func (c *Client) Markdown(ctx context.Context, text string, opts *MarkdownOption // ListEmojis returns the emojis available to use on GitHub. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/emojis/ +// GitHub API docs: https://docs.github.com/en/rest/emojis/ func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response, error) { req, err := c.NewRequest("GET", "emojis", nil) if err != nil { @@ -97,7 +97,7 @@ func (c *CodeOfConduct) String() string { // ListCodesOfConduct returns all codes of conduct. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/codes_of_conduct/#list-all-codes-of-conduct +// GitHub API docs: https://docs.github.com/en/rest/codes_of_conduct/#list-all-codes-of-conduct func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Response, error) { req, err := c.NewRequest("GET", "codes_of_conduct", nil) if err != nil { @@ -118,7 +118,7 @@ func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Res // GetCodeOfConduct returns an individual code of conduct. // -// https://docs.github.com/en/free-pro-team@latest/rest/reference/codes_of_conduct/#get-an-individual-code-of-conduct +// https://docs.github.com/en/rest/codes_of_conduct/#get-an-individual-code-of-conduct func (c *Client) GetCodeOfConduct(ctx context.Context, key string) (*CodeOfConduct, *Response, error) { u := fmt.Sprintf("codes_of_conduct/%s", key) req, err := c.NewRequest("GET", u, nil) @@ -170,13 +170,19 @@ type APIMeta struct { // An array of IP addresses in CIDR format specifying the IP addresses // Dependabot will originate from. Dependabot []string `json:"dependabot,omitempty"` + + // A map of algorithms to SSH key fingerprints. + SSHKeyFingerprints map[string]string `json:"ssh_key_fingerprints,omitempty"` + + // An array of SSH keys. + SSHKeys []string `json:"ssh_keys,omitempty"` } // APIMeta returns information about GitHub.com, the service. Or, if you access // this endpoint on your organization’s GitHub Enterprise installation, this // endpoint provides information about that installation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/meta#get-github-meta-information +// GitHub API docs: https://docs.github.com/en/rest/meta#get-github-meta-information func (c *Client) APIMeta(ctx context.Context) (*APIMeta, *Response, error) { req, err := c.NewRequest("GET", "meta", nil) if err != nil { diff --git a/vendor/github.com/google/go-github/v42/github/orgs.go b/vendor/github.com/google/go-github/v45/github/orgs.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/orgs.go rename to vendor/github.com/google/go-github/v45/github/orgs.go index 62f6ed241c..26b55c62d0 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs.go +++ b/vendor/github.com/google/go-github/v45/github/orgs.go @@ -14,7 +14,7 @@ import ( // OrganizationsService provides access to the organization related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/ +// GitHub API docs: https://docs.github.com/en/rest/orgs/ type OrganizationsService service // Organization represents a GitHub organization account. @@ -65,6 +65,9 @@ type Organization struct { MembersCanCreatePrivateRepos *bool `json:"members_can_create_private_repositories,omitempty"` MembersCanCreateInternalRepos *bool `json:"members_can_create_internal_repositories,omitempty"` + // MembersCanForkPrivateRepos toggles whether organization members can fork private organization repositories. + MembersCanForkPrivateRepos *bool `json:"members_can_fork_private_repositories,omitempty"` + // MembersAllowedRepositoryCreationType denotes if organization members can create repositories // and the type of repositories they can create. Possible values are: "all", "private", or "none". // @@ -132,7 +135,7 @@ type OrganizationsListOptions struct { // listing the next set of organizations, use the ID of the last-returned organization // as the opts.Since parameter for the next call. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organizations +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations func (s *OrganizationsService) ListAll(ctx context.Context, opts *OrganizationsListOptions) ([]*Organization, *Response, error) { u, err := addOptions("organizations", opts) if err != nil { @@ -155,8 +158,8 @@ func (s *OrganizationsService) ListAll(ctx context.Context, opts *OrganizationsL // List the organizations for a user. Passing the empty string will list // organizations for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organizations-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organizations-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-organizations-for-a-user func (s *OrganizationsService) List(ctx context.Context, user string, opts *ListOptions) ([]*Organization, *Response, error) { var u string if user != "" { @@ -185,7 +188,7 @@ func (s *OrganizationsService) List(ctx context.Context, user string, opts *List // Get fetches an organization by name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-an-organization func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organization, *Response, error) { u := fmt.Sprintf("orgs/%v", org) req, err := s.client.NewRequest("GET", u, nil) @@ -226,7 +229,7 @@ func (s *OrganizationsService) GetByID(ctx context.Context, id int64) (*Organiza // Edit an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#update-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#update-an-organization func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organization) (*Organization, *Response, error) { u := fmt.Sprintf("orgs/%v", name) req, err := s.client.NewRequest("PATCH", u, org) @@ -248,7 +251,7 @@ func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organ // ListInstallations lists installations for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-app-installations-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#list-app-installations-for-an-organization func (s *OrganizationsService) ListInstallations(ctx context.Context, org string, opts *ListOptions) (*OrganizationInstallations, *Response, error) { u := fmt.Sprintf("orgs/%v/installations", org) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v45/github/orgs_actions_allowed.go similarity index 76% rename from vendor/github.com/google/go-github/v42/github/orgs_actions_allowed.go rename to vendor/github.com/google/go-github/v45/github/orgs_actions_allowed.go index 9032d033b6..e3b35b1df1 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_actions_allowed.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_actions_allowed.go @@ -10,9 +10,9 @@ import ( "fmt" ) -// ActionsAllowed represents selected actions that are allowed in an organization. +// ActionsAllowed represents selected actions that are allowed. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#get-allowed-actions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions type ActionsAllowed struct { GithubOwnedAllowed *bool `json:"github_owned_allowed,omitempty"` VerifiedAllowed *bool `json:"verified_allowed,omitempty"` @@ -25,7 +25,7 @@ func (a ActionsAllowed) String() string { // GetActionsAllowed gets the actions that are allowed in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#get-allowed-actions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-allowed-actions-and-reusable-workflows-for-an-organization func (s *OrganizationsService) GetActionsAllowed(ctx context.Context, org string) (*ActionsAllowed, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org) @@ -45,14 +45,19 @@ func (s *OrganizationsService) GetActionsAllowed(ctx context.Context, org string // EditActionsAllowed sets the actions that are allowed in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#set-allowed-actions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-allowed-actions-and-reusable-workflows-for-an-organization func (s *OrganizationsService) EditActionsAllowed(ctx context.Context, org string, actionsAllowed ActionsAllowed) (*ActionsAllowed, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions/selected-actions", org) req, err := s.client.NewRequest("PUT", u, actionsAllowed) if err != nil { return nil, nil, err } + p := new(ActionsAllowed) resp, err := s.client.Do(ctx, req, p) - return p, resp, err + if err != nil { + return nil, resp, err + } + + return p, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v45/github/orgs_actions_permissions.go similarity index 81% rename from vendor/github.com/google/go-github/v42/github/orgs_actions_permissions.go rename to vendor/github.com/google/go-github/v45/github/orgs_actions_permissions.go index b8a10b2520..6d1db2ee0a 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_actions_permissions.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_actions_permissions.go @@ -12,7 +12,7 @@ import ( // ActionsPermissions represents a policy for repositories and allowed actions in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#permissions +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions type ActionsPermissions struct { EnabledRepositories *string `json:"enabled_repositories,omitempty"` AllowedActions *string `json:"allowed_actions,omitempty"` @@ -25,7 +25,7 @@ func (a ActionsPermissions) String() string { // GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#get-github-actions-permissions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-an-organization func (s *OrganizationsService) GetActionsPermissions(ctx context.Context, org string) (*ActionsPermissions, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions", org) @@ -45,14 +45,19 @@ func (s *OrganizationsService) GetActionsPermissions(ctx context.Context, org st // EditActionsPermissions sets the permissions policy for repositories and allowed actions in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/actions#set-github-actions-permissions-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-an-organization func (s *OrganizationsService) EditActionsPermissions(ctx context.Context, org string, actionsPermissions ActionsPermissions) (*ActionsPermissions, *Response, error) { u := fmt.Sprintf("orgs/%v/actions/permissions", org) req, err := s.client.NewRequest("PUT", u, actionsPermissions) if err != nil { return nil, nil, err } + p := new(ActionsPermissions) resp, err := s.client.Do(ctx, req, p) - return p, resp, err + if err != nil { + return nil, resp, err + } + + return p, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v45/github/orgs_audit_log.go similarity index 98% rename from vendor/github.com/google/go-github/v42/github/orgs_audit_log.go rename to vendor/github.com/google/go-github/v45/github/orgs_audit_log.go index ade57f5411..52bacfed9a 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_audit_log.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_audit_log.go @@ -93,7 +93,7 @@ type AuditEntry struct { // GetAuditLog gets the audit-log entries for an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/orgs#get-the-audit-log-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/orgs#get-the-audit-log-for-an-organization func (s *OrganizationsService) GetAuditLog(ctx context.Context, org string, opts *GetAuditLogOptions) ([]*AuditEntry, *Response, error) { u := fmt.Sprintf("orgs/%v/audit-log", org) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go new file mode 100644 index 0000000000..9904685b94 --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/orgs_custom_roles.go @@ -0,0 +1,46 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// OrganizationCustomRepoRoles represents custom repository roles available in specified organization. +type OrganizationCustomRepoRoles struct { + TotalCount *int `json:"total_count,omitempty"` + CustomRepoRoles []*CustomRepoRoles `json:"custom_roles,omitempty"` +} + +// CustomRepoRoles represents custom repository roles for an organization. +// See https://docs.github.com/en/enterprise-cloud@latest/organizations/managing-peoples-access-to-your-organization-with-roles/managing-custom-repository-roles-for-an-organization +// for more information. +type CustomRepoRoles struct { + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ListCustomRepoRoles lists the custom repository roles available in this organization. +// In order to see custom repository roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/custom-roles#list-custom-repository-roles-in-an-organization +func (s *OrganizationsService) ListCustomRepoRoles(ctx context.Context, org string) (*OrganizationCustomRepoRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/custom_roles", org) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + customRepoRoles := new(OrganizationCustomRepoRoles) + resp, err := s.client.Do(ctx, req, customRepoRoles) + if err != nil { + return nil, resp, err + } + + return customRepoRoles, resp, nil +} diff --git a/vendor/github.com/google/go-github/v42/github/orgs_hooks.go b/vendor/github.com/google/go-github/v45/github/orgs_hooks.go similarity index 78% rename from vendor/github.com/google/go-github/v42/github/orgs_hooks.go rename to vendor/github.com/google/go-github/v45/github/orgs_hooks.go index dc90656878..c0dd51e24e 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_hooks.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_hooks.go @@ -12,7 +12,7 @@ import ( // ListHooks lists all Hooks for the specified organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-webhooks +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#list-organization-webhooks func (s *OrganizationsService) ListHooks(ctx context.Context, org string, opts *ListOptions) ([]*Hook, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks", org) u, err := addOptions(u, opts) @@ -36,16 +36,21 @@ func (s *OrganizationsService) ListHooks(ctx context.Context, org string, opts * // GetHook returns a single specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#get-an-organization-webhook func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64) (*Hook, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } + hook := new(Hook) resp, err := s.client.Do(ctx, req, hook) - return hook, resp, err + if err != nil { + return nil, resp, err + } + + return hook, resp, nil } // CreateHook creates a Hook for the specified org. @@ -54,7 +59,7 @@ func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int64 // Note that only a subset of the hook fields are used and hook must // not be nil. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#create-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#create-an-organization-webhook func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook *Hook) (*Hook, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks", org) @@ -81,38 +86,45 @@ func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook // EditHook updates a specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#update-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#update-an-organization-webhook func (s *OrganizationsService) EditHook(ctx context.Context, org string, id int64, hook *Hook) (*Hook, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) req, err := s.client.NewRequest("PATCH", u, hook) if err != nil { return nil, nil, err } + h := new(Hook) resp, err := s.client.Do(ctx, req, h) - return h, resp, err + if err != nil { + return nil, resp, err + } + + return h, resp, nil } // PingHook triggers a 'ping' event to be sent to the Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#ping-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#ping-an-organization-webhook func (s *OrganizationsService) PingHook(ctx context.Context, org string, id int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id) req, err := s.client.NewRequest("POST", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } // DeleteHook deletes a specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#delete-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#delete-an-organization-webhook func (s *OrganizationsService) DeleteHook(ctx context.Context, org string, id int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%d", org, id) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } diff --git a/vendor/github.com/google/go-github/v42/github/orgs_hooks_deliveries.go b/vendor/github.com/google/go-github/v45/github/orgs_hooks_deliveries.go similarity index 84% rename from vendor/github.com/google/go-github/v42/github/orgs_hooks_deliveries.go rename to vendor/github.com/google/go-github/v45/github/orgs_hooks_deliveries.go index d1fb5c832a..1bfad409ea 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_hooks_deliveries.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_hooks_deliveries.go @@ -12,7 +12,7 @@ import ( // ListHookDeliveries lists webhook deliveries for a webhook configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/orgs#list-deliveries-for-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#list-deliveries-for-an-organization-webhook func (s *OrganizationsService) ListHookDeliveries(ctx context.Context, org string, id int64, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries", org, id) u, err := addOptions(u, opts) @@ -36,7 +36,7 @@ func (s *OrganizationsService) ListHookDeliveries(ctx context.Context, org strin // GetHookDelivery returns a delivery for a webhook configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/orgs#get-a-webhook-delivery-for-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#get-a-webhook-delivery-for-an-organization-webhook func (s *OrganizationsService) GetHookDelivery(ctx context.Context, owner string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries/%v", owner, hookID, deliveryID) req, err := s.client.NewRequest("GET", u, nil) @@ -55,7 +55,7 @@ func (s *OrganizationsService) GetHookDelivery(ctx context.Context, owner string // RedeliverHookDelivery redelivers a delivery for a webhook configured in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/orgs#redeliver-a-delivery-for-an-organization-webhook +// GitHub API docs: https://docs.github.com/en/rest/orgs/webhooks#redeliver-a-delivery-for-an-organization-webhook func (s *OrganizationsService) RedeliverHookDelivery(ctx context.Context, owner string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { u := fmt.Sprintf("orgs/%v/hooks/%v/deliveries/%v/attempts", owner, hookID, deliveryID) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_members.go b/vendor/github.com/google/go-github/v45/github/orgs_members.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/orgs_members.go rename to vendor/github.com/google/go-github/v45/github/orgs_members.go index f3a2f17c08..38f43bad5a 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_members.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_members.go @@ -71,8 +71,8 @@ type ListMembersOptions struct { // user is an owner of the organization, this will return both concealed and // public members, otherwise it will only return public members. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-members -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-public-organization-members +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-members +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-public-organization-members func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opts *ListMembersOptions) ([]*User, *Response, error) { var u string if opts != nil && opts.PublicOnly { @@ -101,7 +101,7 @@ func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opts // IsMember checks if a user is a member of an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#check-organization-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#check-organization-membership-for-a-user func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) (bool, *Response, error) { u := fmt.Sprintf("orgs/%v/members/%v", org, user) req, err := s.client.NewRequest("GET", u, nil) @@ -116,7 +116,7 @@ func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) ( // IsPublicMember checks if a user is a public member of an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#check-public-organization-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#check-public-organization-membership-for-a-user func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user string) (bool, *Response, error) { u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) req, err := s.client.NewRequest("GET", u, nil) @@ -131,7 +131,7 @@ func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user str // RemoveMember removes a user from all teams of an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-an-organization-member +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-an-organization-member func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/members/%v", org, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -145,7 +145,7 @@ func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user strin // PublicizeMembership publicizes a user's membership in an organization. (A // user cannot publicize the membership for another user.) // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#set-public-organization-membership-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#set-public-organization-membership-for-the-authenticated-user func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) req, err := s.client.NewRequest("PUT", u, nil) @@ -158,7 +158,7 @@ func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, use // ConcealMembership conceals a user's membership in an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-public-organization-membership-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-public-organization-membership-for-the-authenticated-user func (s *OrganizationsService) ConcealMembership(ctx context.Context, org, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/public_members/%v", org, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -181,7 +181,7 @@ type ListOrgMembershipsOptions struct { // ListOrgMemberships lists the organization memberships for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-memberships-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-memberships-for-the-authenticated-user func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opts *ListOrgMembershipsOptions) ([]*Membership, *Response, error) { u := "user/memberships/orgs" u, err := addOptions(u, opts) @@ -207,8 +207,8 @@ func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opts *Lis // Passing an empty string for user will get the membership for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-an-organization-membership-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#get-organization-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#get-an-organization-membership-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#get-organization-membership-for-a-user func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org string) (*Membership, *Response, error) { var u string if user != "" { @@ -235,8 +235,8 @@ func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org s // Passing an empty string for user will edit the membership for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#update-an-organization-membership-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#set-organization-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#update-an-organization-membership-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#set-organization-membership-for-a-user func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org string, membership *Membership) (*Membership, *Response, error) { var u, method string if user != "" { @@ -264,7 +264,7 @@ func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org // RemoveOrgMembership removes user from the specified organization. If the // user has been invited to the organization, this will cancel their invitation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-organization-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#remove-organization-membership-for-a-user func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, org string) (*Response, error) { u := fmt.Sprintf("orgs/%v/memberships/%v", org, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -277,7 +277,7 @@ func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, or // ListPendingOrgInvitations returns a list of pending invitations. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-pending-organization-invitations +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-pending-organization-invitations func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opts *ListOptions) ([]*Invitation, *Response, error) { u := fmt.Sprintf("orgs/%v/invitations", org) u, err := addOptions(u, opts) @@ -295,6 +295,7 @@ func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, or if err != nil { return nil, resp, err } + return pendingInvitations, resp, nil } @@ -322,7 +323,7 @@ type CreateOrgInvitationOptions struct { // In order to create invitations in an organization, // the authenticated user must be an organization owner. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#create-an-organization-invitation +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#create-an-organization-invitation func (s *OrganizationsService) CreateOrgInvitation(ctx context.Context, org string, opts *CreateOrgInvitationOptions) (*Invitation, *Response, error) { u := fmt.Sprintf("orgs/%v/invitations", org) @@ -336,13 +337,14 @@ func (s *OrganizationsService) CreateOrgInvitation(ctx context.Context, org stri if err != nil { return nil, resp, err } + return invitation, resp, nil } // ListOrgInvitationTeams lists all teams associated with an invitation. In order to see invitations in an organization, // the authenticated user must be an organization owner. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-organization-invitation-teams +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-organization-invitation-teams func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, invitationID string, opts *ListOptions) ([]*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/invitations/%v/teams", org, invitationID) u, err := addOptions(u, opts) @@ -360,12 +362,13 @@ func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, if err != nil { return nil, resp, err } + return orgInvitationTeams, resp, nil } // ListFailedOrgInvitations returns a list of failed inviatations. // -// GitHub API docs: https://docs.github.com/en/rest/reference/orgs#list-failed-organization-invitations +// GitHub API docs: https://docs.github.com/en/rest/orgs/members#list-failed-organization-invitations func (s *OrganizationsService) ListFailedOrgInvitations(ctx context.Context, org string, opts *ListOptions) ([]*Invitation, *Response, error) { u := fmt.Sprintf("orgs/%v/failed_invitations", org) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/v45/github/orgs_outside_collaborators.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/orgs_outside_collaborators.go rename to vendor/github.com/google/go-github/v45/github/orgs_outside_collaborators.go index d9ffd25a5f..506a494603 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_outside_collaborators.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_outside_collaborators.go @@ -27,7 +27,7 @@ type ListOutsideCollaboratorsOptions struct { // Warning: The API may change without advance notice during the preview period. // Preview features are not supported for production use. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-outside-collaborators-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#list-outside-collaborators-for-an-organization func (s *OrganizationsService) ListOutsideCollaborators(ctx context.Context, org string, opts *ListOutsideCollaboratorsOptions) ([]*User, *Response, error) { u := fmt.Sprintf("orgs/%v/outside_collaborators", org) u, err := addOptions(u, opts) @@ -52,7 +52,7 @@ func (s *OrganizationsService) ListOutsideCollaborators(ctx context.Context, org // RemoveOutsideCollaborator removes a user from the list of outside collaborators; // consequently, removing them from all the organization's repositories. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#remove-outside-collaborator-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#remove-outside-collaborator-from-an-organization func (s *OrganizationsService) RemoveOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -69,7 +69,7 @@ func (s *OrganizationsService) RemoveOutsideCollaborator(ctx context.Context, or // Responses for converting a non-member or the last owner to an outside collaborator // are listed in GitHub API docs. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#convert-an-organization-member-to-outside-collaborator +// GitHub API docs: https://docs.github.com/en/rest/orgs/outside-collaborators#convert-an-organization-member-to-outside-collaborator func (s *OrganizationsService) ConvertMemberToOutsideCollaborator(ctx context.Context, org string, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/outside_collaborators/%v", org, user) req, err := s.client.NewRequest("PUT", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_packages.go b/vendor/github.com/google/go-github/v45/github/orgs_packages.go similarity index 81% rename from vendor/github.com/google/go-github/v42/github/orgs_packages.go rename to vendor/github.com/google/go-github/v45/github/orgs_packages.go index 0c36f21cae..9fb11308b8 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_packages.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_packages.go @@ -12,7 +12,7 @@ import ( // List the packages for an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#list-packages-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-an-organization func (s *OrganizationsService) ListPackages(ctx context.Context, org string, opts *PackageListOptions) ([]*Package, *Response, error) { u := fmt.Sprintf("orgs/%v/packages", org) u, err := addOptions(u, opts) @@ -36,7 +36,7 @@ func (s *OrganizationsService) ListPackages(ctx context.Context, org string, opt // Get a package by name from an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-an-organization func (s *OrganizationsService) GetPackage(ctx context.Context, org, packageType, packageName string) (*Package, *Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v", org, packageType, packageName) req, err := s.client.NewRequest("GET", u, nil) @@ -55,7 +55,7 @@ func (s *OrganizationsService) GetPackage(ctx context.Context, org, packageType, // Delete a package from an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-a-package-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-an-organization func (s *OrganizationsService) DeletePackage(ctx context.Context, org, packageType, packageName string) (*Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v", org, packageType, packageName) req, err := s.client.NewRequest("DELETE", u, nil) @@ -68,7 +68,7 @@ func (s *OrganizationsService) DeletePackage(ctx context.Context, org, packageTy // Restore a package to an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-a-package-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-an-organization func (s *OrganizationsService) RestorePackage(ctx context.Context, org, packageType, packageName string) (*Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/restore", org, packageType, packageName) req, err := s.client.NewRequest("POST", u, nil) @@ -81,7 +81,7 @@ func (s *OrganizationsService) RestorePackage(ctx context.Context, org, packageT // Get all versions of a package in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-all-package-versions-for-a-package-owned-by-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-an-organization func (s *OrganizationsService) PackageGetAllVersions(ctx context.Context, org, packageType, packageName string, opts *PackageListOptions) ([]*PackageVersion, *Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions", org, packageType, packageName) u, err := addOptions(u, opts) @@ -105,7 +105,7 @@ func (s *OrganizationsService) PackageGetAllVersions(ctx context.Context, org, p // Get a specific version of a package in an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-version-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-an-organization func (s *OrganizationsService) PackageGetVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*PackageVersion, *Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v", org, packageType, packageName, packageVersionID) req, err := s.client.NewRequest("GET", u, nil) @@ -124,7 +124,7 @@ func (s *OrganizationsService) PackageGetVersion(ctx context.Context, org, packa // Delete a package version from an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-package-version-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-package-version-for-an-organization func (s *OrganizationsService) PackageDeleteVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v", org, packageType, packageName, packageVersionID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -137,7 +137,7 @@ func (s *OrganizationsService) PackageDeleteVersion(ctx context.Context, org, pa // Restore a package version to an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-package-version-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-package-version-for-an-organization func (s *OrganizationsService) PackageRestoreVersion(ctx context.Context, org, packageType, packageName string, packageVersionID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/packages/%v/%v/versions/%v/restore", org, packageType, packageName, packageVersionID) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_projects.go b/vendor/github.com/google/go-github/v45/github/orgs_projects.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/orgs_projects.go rename to vendor/github.com/google/go-github/v45/github/orgs_projects.go index b0c60ecb9e..d49eae54dc 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_projects.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_projects.go @@ -12,7 +12,7 @@ import ( // ListProjects lists the projects for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-organization-projects +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-organization-projects func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opts *ProjectListOptions) ([]*Project, *Response, error) { u := fmt.Sprintf("orgs/%v/projects", org) u, err := addOptions(u, opts) @@ -39,7 +39,7 @@ func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opt // CreateProject creates a GitHub Project for the specified organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#create-an-organization-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-an-organization-project func (s *OrganizationsService) CreateProject(ctx context.Context, org string, opts *ProjectOptions) (*Project, *Response, error) { u := fmt.Sprintf("orgs/%v/projects", org) req, err := s.client.NewRequest("POST", u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/v45/github/orgs_users_blocking.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/orgs_users_blocking.go rename to vendor/github.com/google/go-github/v45/github/orgs_users_blocking.go index 2773344c9f..9c6cf60269 100644 --- a/vendor/github.com/google/go-github/v42/github/orgs_users_blocking.go +++ b/vendor/github.com/google/go-github/v45/github/orgs_users_blocking.go @@ -12,7 +12,7 @@ import ( // ListBlockedUsers lists all the users blocked by an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#list-users-blocked-by-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#list-users-blocked-by-an-organization func (s *OrganizationsService) ListBlockedUsers(ctx context.Context, org string, opts *ListOptions) ([]*User, *Response, error) { u := fmt.Sprintf("orgs/%v/blocks", org) u, err := addOptions(u, opts) @@ -39,7 +39,7 @@ func (s *OrganizationsService) ListBlockedUsers(ctx context.Context, org string, // IsBlocked reports whether specified user is blocked from an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#check-if-a-user-is-blocked-by-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#check-if-a-user-is-blocked-by-an-organization func (s *OrganizationsService) IsBlocked(ctx context.Context, org string, user string) (bool, *Response, error) { u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) @@ -58,7 +58,7 @@ func (s *OrganizationsService) IsBlocked(ctx context.Context, org string, user s // BlockUser blocks specified user from an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#block-a-user-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#block-a-user-from-an-organization func (s *OrganizationsService) BlockUser(ctx context.Context, org string, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) @@ -75,7 +75,7 @@ func (s *OrganizationsService) BlockUser(ctx context.Context, org string, user s // UnblockUser unblocks specified user from an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/orgs/#unblock-a-user-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/orgs/blocking#unblock-a-user-from-an-organization func (s *OrganizationsService) UnblockUser(ctx context.Context, org string, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/blocks/%v", org, user) diff --git a/vendor/github.com/google/go-github/v42/github/packages.go b/vendor/github.com/google/go-github/v45/github/packages.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/packages.go rename to vendor/github.com/google/go-github/v45/github/packages.go diff --git a/vendor/github.com/google/go-github/v42/github/projects.go b/vendor/github.com/google/go-github/v45/github/projects.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/projects.go rename to vendor/github.com/google/go-github/v45/github/projects.go index 2886c3a3b0..df7ad6cd97 100644 --- a/vendor/github.com/google/go-github/v42/github/projects.go +++ b/vendor/github.com/google/go-github/v45/github/projects.go @@ -13,7 +13,7 @@ import ( // ProjectsService provides access to the projects functions in the // GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/ +// GitHub API docs: https://docs.github.com/en/rest/projects type ProjectsService service // Project represents a GitHub Project. @@ -43,7 +43,7 @@ func (p Project) String() string { // GetProject gets a GitHub Project for a repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#get-a-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#get-a-project func (s *ProjectsService) GetProject(ctx context.Context, id int64) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("GET", u, nil) @@ -90,7 +90,7 @@ type ProjectOptions struct { // UpdateProject updates a repository project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#update-a-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#update-a-project func (s *ProjectsService) UpdateProject(ctx context.Context, id int64, opts *ProjectOptions) (*Project, *Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("PATCH", u, opts) @@ -112,7 +112,7 @@ func (s *ProjectsService) UpdateProject(ctx context.Context, id int64, opts *Pro // DeleteProject deletes a GitHub Project from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#delete-a-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#delete-a-project func (s *ProjectsService) DeleteProject(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("projects/%v", id) req, err := s.client.NewRequest("DELETE", u, nil) @@ -128,7 +128,7 @@ func (s *ProjectsService) DeleteProject(ctx context.Context, id int64) (*Respons // ProjectColumn represents a column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/projects/ +// GitHub API docs: https://docs.github.com/en/rest/repos/projects/ type ProjectColumn struct { ID *int64 `json:"id,omitempty"` Name *string `json:"name,omitempty"` @@ -142,7 +142,7 @@ type ProjectColumn struct { // ListProjectColumns lists the columns of a GitHub Project for a repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-project-columns +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#list-project-columns func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int64, opts *ListOptions) ([]*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) u, err := addOptions(u, opts) @@ -169,7 +169,7 @@ func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int6 // GetProjectColumn gets a column of a GitHub Project for a repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#get-a-project-column +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#get-a-project-column func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int64) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", id) req, err := s.client.NewRequest("GET", u, nil) @@ -199,7 +199,7 @@ type ProjectColumnOptions struct { // CreateProjectColumn creates a column for the specified (by number) project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#create-a-project-column +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#create-a-project-column func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/%v/columns", projectID) req, err := s.client.NewRequest("POST", u, opts) @@ -221,7 +221,7 @@ func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int // UpdateProjectColumn updates a column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#update-an-existing-project-column +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#update-an-existing-project-column func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnOptions) (*ProjectColumn, *Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("PATCH", u, opts) @@ -243,7 +243,7 @@ func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int6 // DeleteProjectColumn deletes a column from a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#delete-a-project-column +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#delete-a-project-column func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int64) (*Response, error) { u := fmt.Sprintf("projects/columns/%v", columnID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -267,7 +267,7 @@ type ProjectColumnMoveOptions struct { // MoveProjectColumn moves a column within a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#move-a-project-column +// GitHub API docs: https://docs.github.com/en/rest/projects/columns#move-a-project-column func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int64, opts *ProjectColumnMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/%v/moves", columnID) req, err := s.client.NewRequest("POST", u, opts) @@ -283,7 +283,7 @@ func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int64, // ProjectCard represents a card in a column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/cards/#get-a-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards/#get-a-project-card type ProjectCard struct { URL *string `json:"url,omitempty"` ColumnURL *string `json:"column_url,omitempty"` @@ -318,7 +318,7 @@ type ProjectCardListOptions struct { // ListProjectCards lists the cards in a column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-project-cards +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#list-project-cards func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int64, opts *ProjectCardListOptions) ([]*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) u, err := addOptions(u, opts) @@ -345,7 +345,7 @@ func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int64, // GetProjectCard gets a card in a column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#get-a-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#get-a-project-card func (s *ProjectsService) GetProjectCard(ctx context.Context, cardID int64) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("GET", u, nil) @@ -383,7 +383,7 @@ type ProjectCardOptions struct { // CreateProjectCard creates a card in the specified column of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#create-a-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#create-a-project-card func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/%v/cards", columnID) req, err := s.client.NewRequest("POST", u, opts) @@ -405,7 +405,7 @@ func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int64, // UpdateProjectCard updates a card of a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#update-an-existing-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#update-an-existing-project-card func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int64, opts *ProjectCardOptions) (*ProjectCard, *Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("PATCH", u, opts) @@ -427,7 +427,7 @@ func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int64, o // DeleteProjectCard deletes a card from a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#delete-a-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#delete-a-project-card func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int64) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v", cardID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -455,7 +455,7 @@ type ProjectCardMoveOptions struct { // MoveProjectCard moves a card within a GitHub Project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#move-a-project-card +// GitHub API docs: https://docs.github.com/en/rest/projects/cards#move-a-project-card func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int64, opts *ProjectCardMoveOptions) (*Response, error) { u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID) req, err := s.client.NewRequest("POST", u, opts) @@ -485,7 +485,7 @@ type ProjectCollaboratorOptions struct { // AddProjectCollaborator adds a collaborator to an organization project and sets // their permission level. You must be an organization owner or a project admin to add a collaborator. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#add-project-collaborator +// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#add-project-collaborator func (s *ProjectsService) AddProjectCollaborator(ctx context.Context, id int64, username string, opts *ProjectCollaboratorOptions) (*Response, error) { u := fmt.Sprintf("projects/%v/collaborators/%v", id, username) req, err := s.client.NewRequest("PUT", u, opts) @@ -502,7 +502,7 @@ func (s *ProjectsService) AddProjectCollaborator(ctx context.Context, id int64, // RemoveProjectCollaborator removes a collaborator from an organization project. // You must be an organization owner or a project admin to remove a collaborator. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#remove-user-as-a-collaborator +// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#remove-user-as-a-collaborator func (s *ProjectsService) RemoveProjectCollaborator(ctx context.Context, id int64, username string) (*Response, error) { u := fmt.Sprintf("projects/%v/collaborators/%v", id, username) req, err := s.client.NewRequest("DELETE", u, nil) @@ -538,7 +538,7 @@ type ListCollaboratorOptions struct { // with access through default organization permissions, and organization owners. You must be an // organization owner or a project admin to list collaborators. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-project-collaborators +// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#list-project-collaborators func (s *ProjectsService) ListProjectCollaborators(ctx context.Context, id int64, opts *ListCollaboratorOptions) ([]*User, *Response, error) { u := fmt.Sprintf("projects/%v/collaborators", id) u, err := addOptions(u, opts) @@ -576,7 +576,7 @@ type ProjectPermissionLevel struct { // project. Possible values for the permission key: "admin", "write", "read", "none". // You must be an organization owner or a project admin to review a user's permission level. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#get-project-permission-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/projects/collaborators#get-project-permission-for-a-user func (s *ProjectsService) ReviewProjectCollaboratorPermission(ctx context.Context, id int64, username string) (*ProjectPermissionLevel, *Response, error) { u := fmt.Sprintf("projects/%v/collaborators/%v/permission", id, username) req, err := s.client.NewRequest("GET", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/pulls.go b/vendor/github.com/google/go-github/v45/github/pulls.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/pulls.go rename to vendor/github.com/google/go-github/v45/github/pulls.go index 37fb7413a4..120a1d6f18 100644 --- a/vendor/github.com/google/go-github/v42/github/pulls.go +++ b/vendor/github.com/google/go-github/v45/github/pulls.go @@ -15,7 +15,7 @@ import ( // PullRequestsService handles communication with the pull request related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/ +// GitHub API docs: https://docs.github.com/en/rest/pulls/ type PullRequestsService service // PullRequestAutoMerge represents the "auto_merge" response for a PullRequest. @@ -143,7 +143,7 @@ type PullRequestListOptions struct { // List the pull requests for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-pull-requests +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-pull-requests func (s *PullRequestsService) List(ctx context.Context, owner string, repo string, opts *PullRequestListOptions) ([]*PullRequest, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) u, err := addOptions(u, opts) @@ -170,7 +170,7 @@ func (s *PullRequestsService) List(ctx context.Context, owner string, repo strin // The results may include open and closed pull requests. // By default, the PullRequestListOptions State filters for "open". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/commits/#list-pull-requests-associated-with-a-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-pull-requests-associated-with-a-commit func (s *PullRequestsService) ListPullRequestsWithCommit(ctx context.Context, owner, repo, sha string, opts *PullRequestListOptions) ([]*PullRequest, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/pulls", owner, repo, sha) u, err := addOptions(u, opts) @@ -196,7 +196,7 @@ func (s *PullRequestsService) ListPullRequestsWithCommit(ctx context.Context, ow // Get a single pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#get-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#get-a-pull-request func (s *PullRequestsService) Get(ctx context.Context, owner string, repo string, number int) (*PullRequest, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) req, err := s.client.NewRequest("GET", u, nil) @@ -215,7 +215,7 @@ func (s *PullRequestsService) Get(ctx context.Context, owner string, repo string // GetRaw gets a single pull request in raw (diff or patch) format. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#get-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#get-a-pull-request func (s *PullRequestsService) GetRaw(ctx context.Context, owner string, repo string, number int, opts RawOptions) (string, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number) req, err := s.client.NewRequest("GET", u, nil) @@ -254,7 +254,7 @@ type NewPullRequest struct { // Create a new pull request on the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#create-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#create-a-pull-request func (s *PullRequestsService) Create(ctx context.Context, owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo) req, err := s.client.NewRequest("POST", u, pull) @@ -293,7 +293,7 @@ type PullRequestBranchUpdateResponse struct { // A follow up request, after a delay of a second or so, should result // in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#update-a-pull-request-branch +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#update-a-pull-request-branch func (s *PullRequestsService) UpdateBranch(ctx context.Context, owner, repo string, number int, opts *PullRequestBranchUpdateOptions) (*PullRequestBranchUpdateResponse, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/update-branch", owner, repo, number) @@ -328,7 +328,7 @@ type pullRequestUpdate struct { // The following fields are editable: Title, Body, State, Base.Ref and MaintainerCanModify. // Base.Ref updates the base branch of the pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#update-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#update-a-pull-request func (s *PullRequestsService) Edit(ctx context.Context, owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) { if pull == nil { return nil, nil, fmt.Errorf("pull must be provided") @@ -365,7 +365,7 @@ func (s *PullRequestsService) Edit(ctx context.Context, owner string, repo strin // ListCommits lists the commits in a pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-commits-on-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-commits-on-a-pull-request func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*RepositoryCommit, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number) u, err := addOptions(u, opts) @@ -389,7 +389,7 @@ func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, rep // ListFiles lists the files in a pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-pull-requests-files +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#list-pull-requests-files func (s *PullRequestsService) ListFiles(ctx context.Context, owner string, repo string, number int, opts *ListOptions) ([]*CommitFile, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number) u, err := addOptions(u, opts) @@ -413,7 +413,7 @@ func (s *PullRequestsService) ListFiles(ctx context.Context, owner string, repo // IsMerged checks if a pull request has been merged. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#check-if-a-pull-request-has-been-merged +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#check-if-a-pull-request-has-been-merged func (s *PullRequestsService) IsMerged(ctx context.Context, owner string, repo string, number int) (bool, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) req, err := s.client.NewRequest("GET", u, nil) @@ -455,7 +455,7 @@ type pullRequestMergeRequest struct { // Merge a pull request. // commitMessage is an extra detail to append to automatic commit message. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#merge-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/pulls#merge-a-pull-request func (s *PullRequestsService) Merge(ctx context.Context, owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number) diff --git a/vendor/github.com/google/go-github/v42/github/pulls_comments.go b/vendor/github.com/google/go-github/v45/github/pulls_comments.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/pulls_comments.go rename to vendor/github.com/google/go-github/v45/github/pulls_comments.go index 5078bab1d0..83e7881e51 100644 --- a/vendor/github.com/google/go-github/v42/github/pulls_comments.go +++ b/vendor/github.com/google/go-github/v45/github/pulls_comments.go @@ -66,8 +66,8 @@ type PullRequestListCommentsOptions struct { // pull request number of 0 will return all comments on all pull requests for // the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-review-comments-on-a-pull-request -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-review-comments-in-a-repository +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#list-review-comments-on-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#list-review-comments-in-a-repository func (s *PullRequestsService) ListComments(ctx context.Context, owner, repo string, number int, opts *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) { var u string if number == 0 { @@ -100,7 +100,7 @@ func (s *PullRequestsService) ListComments(ctx context.Context, owner, repo stri // GetComment fetches the specified pull request comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#get-a-review-comment-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#get-a-review-comment-for-a-pull-request func (s *PullRequestsService) GetComment(ctx context.Context, owner, repo string, commentID int64) (*PullRequestComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) req, err := s.client.NewRequest("GET", u, nil) @@ -123,7 +123,7 @@ func (s *PullRequestsService) GetComment(ctx context.Context, owner, repo string // CreateComment creates a new comment on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#create-a-review-comment-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#create-a-review-comment-for-a-pull-request func (s *PullRequestsService) CreateComment(ctx context.Context, owner, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number) req, err := s.client.NewRequest("POST", u, comment) @@ -145,7 +145,7 @@ func (s *PullRequestsService) CreateComment(ctx context.Context, owner, repo str // CreateCommentInReplyTo creates a new comment as a reply to an existing pull request comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#create-a-review-comment-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#create-a-review-comment-for-a-pull-request func (s *PullRequestsService) CreateCommentInReplyTo(ctx context.Context, owner, repo string, number int, body string, commentID int64) (*PullRequestComment, *Response, error) { comment := &struct { Body string `json:"body,omitempty"` @@ -172,7 +172,7 @@ func (s *PullRequestsService) CreateCommentInReplyTo(ctx context.Context, owner, // EditComment updates a pull request comment. // A non-nil comment.Body must be provided. Other comment fields should be left nil. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#update-a-review-comment-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#update-a-review-comment-for-a-pull-request func (s *PullRequestsService) EditComment(ctx context.Context, owner, repo string, commentID int64, comment *PullRequestComment) (*PullRequestComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) req, err := s.client.NewRequest("PATCH", u, comment) @@ -191,7 +191,7 @@ func (s *PullRequestsService) EditComment(ctx context.Context, owner, repo strin // DeleteComment deletes a pull request comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#delete-a-review-comment-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/comments#delete-a-review-comment-for-a-pull-request func (s *PullRequestsService) DeleteComment(ctx context.Context, owner, repo string, commentID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, commentID) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/pulls_reviewers.go b/vendor/github.com/google/go-github/v45/github/pulls_reviewers.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/pulls_reviewers.go rename to vendor/github.com/google/go-github/v45/github/pulls_reviewers.go index f901c2e8a2..1c336540b8 100644 --- a/vendor/github.com/google/go-github/v42/github/pulls_reviewers.go +++ b/vendor/github.com/google/go-github/v45/github/pulls_reviewers.go @@ -25,7 +25,7 @@ type Reviewers struct { // RequestReviewers creates a review request for the provided reviewers for the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#request-reviewers-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#request-reviewers-for-a-pull-request func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*PullRequest, *Response, error) { u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) req, err := s.client.NewRequest("POST", u, &reviewers) @@ -44,7 +44,7 @@ func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo // ListReviewers lists reviewers whose reviews have been requested on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-requested-reviewers-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#list-requested-reviewers-for-a-pull-request func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int, opts *ListOptions) (*Reviewers, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/requested_reviewers", owner, repo, number) u, err := addOptions(u, opts) @@ -68,7 +68,7 @@ func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo str // RemoveReviewers removes the review request for the provided reviewers for the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#remove-requested-reviewers-from-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/review-requests#remove-requested-reviewers-from-a-pull-request func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*Response, error) { u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number) req, err := s.client.NewRequest("DELETE", u, &reviewers) diff --git a/vendor/github.com/google/go-github/v42/github/pulls_reviews.go b/vendor/github.com/google/go-github/v45/github/pulls_reviews.go similarity index 90% rename from vendor/github.com/google/go-github/v42/github/pulls_reviews.go rename to vendor/github.com/google/go-github/v45/github/pulls_reviews.go index 437b0937be..14e20322ae 100644 --- a/vendor/github.com/google/go-github/v42/github/pulls_reviews.go +++ b/vendor/github.com/google/go-github/v45/github/pulls_reviews.go @@ -101,7 +101,7 @@ func (r PullRequestReviewDismissalRequest) String() string { // ListReviews lists all reviews on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-reviews-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#list-reviews-for-a-pull-request func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number) u, err := addOptions(u, opts) @@ -125,7 +125,7 @@ func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo strin // GetReview fetches the specified pull request review. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#get-a-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#get-a-review-for-a-pull-request func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) @@ -145,7 +145,7 @@ func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string, // DeletePendingReview deletes the specified pull request pending review. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#delete-a-pending-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#delete-a-pending-review-for-a-pull-request func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, repo string, number int, reviewID int64) (*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID) @@ -165,7 +165,7 @@ func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, re // ListReviewComments lists all the comments for the specified review. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#list-comments-for-a-pull-request-review +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#list-comments-for-a-pull-request-review func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number int, reviewID int64, opts *ListOptions) ([]*PullRequestComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/comments", owner, repo, number, reviewID) u, err := addOptions(u, opts) @@ -189,7 +189,7 @@ func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, rep // CreateReview creates a new review on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#create-a-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#create-a-review-for-a-pull-request // // In order to use multi-line comments, you must use the "comfort fade" preview. // This replaces the use of the "Position" field in comments with 4 new fields: @@ -250,7 +250,7 @@ func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo stri // UpdateReview updates the review summary on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#update-a-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#update-a-review-for-a-pull-request func (s *PullRequestsService) UpdateReview(ctx context.Context, owner, repo string, number int, reviewID int64, body string) (*PullRequestReview, *Response, error) { opts := &struct { Body string `json:"body"` @@ -273,7 +273,7 @@ func (s *PullRequestsService) UpdateReview(ctx context.Context, owner, repo stri // SubmitReview submits a specified review on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#submit-a-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#submit-a-review-for-a-pull-request func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/events", owner, repo, number, reviewID) @@ -293,7 +293,7 @@ func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo stri // DismissReview dismisses a specified review on the specified pull request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/pulls/#dismiss-a-review-for-a-pull-request +// GitHub API docs: https://docs.github.com/en/rest/pulls/reviews#dismiss-a-review-for-a-pull-request func (s *PullRequestsService) DismissReview(ctx context.Context, owner, repo string, number int, reviewID int64, review *PullRequestReviewDismissalRequest) (*PullRequestReview, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/dismissals", owner, repo, number, reviewID) diff --git a/vendor/github.com/google/go-github/v45/github/pulls_threads.go b/vendor/github.com/google/go-github/v45/github/pulls_threads.go new file mode 100644 index 0000000000..23e924d88f --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/pulls_threads.go @@ -0,0 +1,17 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +// PullRequestThread represents a thread of comments on a pull request. +type PullRequestThread struct { + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Comments []*PullRequestComment `json:"comments,omitempty"` +} + +func (p PullRequestThread) String() string { + return Stringify(p) +} diff --git a/vendor/github.com/google/go-github/v42/github/reactions.go b/vendor/github.com/google/go-github/v45/github/reactions.go similarity index 83% rename from vendor/github.com/google/go-github/v42/github/reactions.go rename to vendor/github.com/google/go-github/v45/github/reactions.go index ecfcf2e945..14d193ae88 100644 --- a/vendor/github.com/google/go-github/v42/github/reactions.go +++ b/vendor/github.com/google/go-github/v45/github/reactions.go @@ -14,7 +14,7 @@ import ( // ReactionsService provides access to the reactions-related functions in the // GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/ +// GitHub API docs: https://docs.github.com/en/rest/reactions type ReactionsService service // Reaction represents a GitHub reaction. @@ -60,7 +60,7 @@ type ListCommentReactionOptions struct { // ListCommentReactions lists the reactions for a commit comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-commit-comment func (s *ReactionsService) ListCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListCommentReactionOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) u, err := addOptions(u, opts) @@ -90,7 +90,7 @@ func (s *ReactionsService) ListCommentReactions(ctx context.Context, owner, repo // previously created reaction will be returned with Status: 200 OK. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-commit-comment func (s *ReactionsService) CreateCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id) @@ -114,7 +114,7 @@ func (s *ReactionsService) CreateCommentReaction(ctx context.Context, owner, rep // DeleteCommentReaction deletes the reaction for a commit comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-a-commit-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-commit-comment-reaction func (s *ReactionsService) DeleteCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions/%v", owner, repo, commentID, reactionID) @@ -123,7 +123,7 @@ func (s *ReactionsService) DeleteCommentReaction(ctx context.Context, owner, rep // DeleteCommentReactionByID deletes the reaction for a commit comment by repository ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-a-commit-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-commit-comment-reaction func (s *ReactionsService) DeleteCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { u := fmt.Sprintf("repositories/%v/comments/%v/reactions/%v", repoID, commentID, reactionID) @@ -132,7 +132,7 @@ func (s *ReactionsService) DeleteCommentReactionByID(ctx context.Context, repoID // ListIssueReactions lists the reactions for an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-an-issue +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-an-issue func (s *ReactionsService) ListIssueReactions(ctx context.Context, owner, repo string, number int, opts *ListOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) u, err := addOptions(u, opts) @@ -162,7 +162,7 @@ func (s *ReactionsService) ListIssueReactions(ctx context.Context, owner, repo s // previously created reaction will be returned with Status: 200 OK. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-an-issue +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-an-issue func (s *ReactionsService) CreateIssueReaction(ctx context.Context, owner, repo string, number int, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number) @@ -186,7 +186,7 @@ func (s *ReactionsService) CreateIssueReaction(ctx context.Context, owner, repo // DeleteIssueReaction deletes the reaction to an issue. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-an-issue-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-reaction func (s *ReactionsService) DeleteIssueReaction(ctx context.Context, owner, repo string, issueNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/issues/%v/reactions/%v", owner, repo, issueNumber, reactionID) @@ -195,7 +195,7 @@ func (s *ReactionsService) DeleteIssueReaction(ctx context.Context, owner, repo // DeleteIssueReactionByID deletes the reaction to an issue by repository ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-an-issue-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-reaction func (s *ReactionsService) DeleteIssueReactionByID(ctx context.Context, repoID, issueNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("repositories/%v/issues/%v/reactions/%v", repoID, issueNumber, reactionID) @@ -204,7 +204,7 @@ func (s *ReactionsService) DeleteIssueReactionByID(ctx context.Context, repoID, // ListIssueCommentReactions lists the reactions for an issue comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-an-issue-comment func (s *ReactionsService) ListIssueCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) u, err := addOptions(u, opts) @@ -234,7 +234,7 @@ func (s *ReactionsService) ListIssueCommentReactions(ctx context.Context, owner, // previously created reaction will be returned with Status: 200 OK. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-an-issue-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-an-issue-comment func (s *ReactionsService) CreateIssueCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id) @@ -258,7 +258,7 @@ func (s *ReactionsService) CreateIssueCommentReaction(ctx context.Context, owner // DeleteIssueCommentReaction deletes the reaction to an issue comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-an-issue-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-comment-reaction func (s *ReactionsService) DeleteIssueCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions/%v", owner, repo, commentID, reactionID) @@ -267,7 +267,7 @@ func (s *ReactionsService) DeleteIssueCommentReaction(ctx context.Context, owner // DeleteIssueCommentReactionByID deletes the reaction to an issue comment by repository ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-an-issue-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-an-issue-comment-reaction func (s *ReactionsService) DeleteIssueCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { url := fmt.Sprintf("repositories/%v/issues/comments/%v/reactions/%v", repoID, commentID, reactionID) @@ -276,7 +276,7 @@ func (s *ReactionsService) DeleteIssueCommentReactionByID(ctx context.Context, r // ListPullRequestCommentReactions lists the reactions for a pull request review comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-a-pull-request-review-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-pull-request-review-comment func (s *ReactionsService) ListPullRequestCommentReactions(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) u, err := addOptions(u, opts) @@ -306,7 +306,7 @@ func (s *ReactionsService) ListPullRequestCommentReactions(ctx context.Context, // previously created reaction will be returned with Status: 200 OK. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-a-pull-request-review-comment +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-pull-request-review-comment func (s *ReactionsService) CreatePullRequestCommentReaction(ctx context.Context, owner, repo string, id int64, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id) @@ -330,7 +330,7 @@ func (s *ReactionsService) CreatePullRequestCommentReaction(ctx context.Context, // DeletePullRequestCommentReaction deletes the reaction to a pull request review comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-a-pull-request-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-pull-request-comment-reaction func (s *ReactionsService) DeletePullRequestCommentReaction(ctx context.Context, owner, repo string, commentID, reactionID int64) (*Response, error) { url := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions/%v", owner, repo, commentID, reactionID) @@ -339,7 +339,7 @@ func (s *ReactionsService) DeletePullRequestCommentReaction(ctx context.Context, // DeletePullRequestCommentReactionByID deletes the reaction to a pull request review comment by repository ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-a-pull-request-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-a-pull-request-comment-reaction func (s *ReactionsService) DeletePullRequestCommentReactionByID(ctx context.Context, repoID, commentID, reactionID int64) (*Response, error) { url := fmt.Sprintf("repositories/%v/pulls/comments/%v/reactions/%v", repoID, commentID, reactionID) @@ -348,7 +348,7 @@ func (s *ReactionsService) DeletePullRequestCommentReactionByID(ctx context.Cont // ListTeamDiscussionReactions lists the reactions for a team discussion. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-a-team-discussion-legacy +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-team-discussion-legacy func (s *ReactionsService) ListTeamDiscussionReactions(ctx context.Context, teamID int64, discussionNumber int, opts *ListOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) u, err := addOptions(u, opts) @@ -375,7 +375,7 @@ func (s *ReactionsService) ListTeamDiscussionReactions(ctx context.Context, team // CreateTeamDiscussionReaction creates a reaction for a team discussion. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-a-team-discussion-legacy +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-legacy func (s *ReactionsService) CreateTeamDiscussionReaction(ctx context.Context, teamID int64, discussionNumber int, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("teams/%v/discussions/%v/reactions", teamID, discussionNumber) @@ -398,7 +398,7 @@ func (s *ReactionsService) CreateTeamDiscussionReaction(ctx context.Context, tea // DeleteTeamDiscussionReaction deletes the reaction to a team discussion. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-team-discussion-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-team-discussion-reaction func (s *ReactionsService) DeleteTeamDiscussionReaction(ctx context.Context, org, teamSlug string, discussionNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/reactions/%v", org, teamSlug, discussionNumber, reactionID) @@ -407,7 +407,7 @@ func (s *ReactionsService) DeleteTeamDiscussionReaction(ctx context.Context, org // DeleteTeamDiscussionReactionByOrgIDAndTeamID deletes the reaction to a team discussion by organization ID and team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-team-discussion-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion func (s *ReactionsService) DeleteTeamDiscussionReactionByOrgIDAndTeamID(ctx context.Context, orgID, teamID, discussionNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/reactions/%v", orgID, teamID, discussionNumber, reactionID) @@ -416,7 +416,7 @@ func (s *ReactionsService) DeleteTeamDiscussionReactionByOrgIDAndTeamID(ctx cont // ListTeamDiscussionCommentReactions lists the reactions for a team discussion comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#list-reactions-for-a-team-discussion-comment-legacy +// GitHub API docs: https://docs.github.com/en/rest/reactions#list-reactions-for-a-team-discussion-comment-legacy func (s *ReactionsService) ListTeamDiscussionCommentReactions(ctx context.Context, teamID int64, discussionNumber, commentNumber int, opts *ListOptions) ([]*Reaction, *Response, error) { u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) u, err := addOptions(u, opts) @@ -442,7 +442,7 @@ func (s *ReactionsService) ListTeamDiscussionCommentReactions(ctx context.Contex // CreateTeamDiscussionCommentReaction creates a reaction for a team discussion comment. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-a-team-discussion-comment-legacy +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-comment-legacy func (s *ReactionsService) CreateTeamDiscussionCommentReaction(ctx context.Context, teamID int64, discussionNumber, commentNumber int, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("teams/%v/discussions/%v/comments/%v/reactions", teamID, discussionNumber, commentNumber) @@ -465,7 +465,7 @@ func (s *ReactionsService) CreateTeamDiscussionCommentReaction(ctx context.Conte // DeleteTeamDiscussionCommentReaction deletes the reaction to a team discussion comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-team-discussion-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#delete-team-discussion-comment-reaction func (s *ReactionsService) DeleteTeamDiscussionCommentReaction(ctx context.Context, org, teamSlug string, discussionNumber, commentNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v/reactions/%v", org, teamSlug, discussionNumber, commentNumber, reactionID) @@ -474,7 +474,7 @@ func (s *ReactionsService) DeleteTeamDiscussionCommentReaction(ctx context.Conte // DeleteTeamDiscussionCommentReactionByOrgIDAndTeamID deletes the reaction to a team discussion comment by organization ID and team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#delete-team-discussion-comment-reaction +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-team-discussion-comment func (s *ReactionsService) DeleteTeamDiscussionCommentReactionByOrgIDAndTeamID(ctx context.Context, orgID, teamID, discussionNumber, commentNumber int, reactionID int64) (*Response, error) { url := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v/reactions/%v", orgID, teamID, discussionNumber, commentNumber, reactionID) @@ -498,7 +498,7 @@ func (s *ReactionsService) deleteReaction(ctx context.Context, url string) (*Res // added the reaction type to this release. // The content should have one of the following values: "+1", "-1", "laugh", "confused", "heart", "hooray", "rocket", or "eyes". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/reactions/#create-reaction-for-a-release +// GitHub API docs: https://docs.github.com/en/rest/reactions#create-reaction-for-a-release func (s *ReactionsService) CreateReleaseReaction(ctx context.Context, owner, repo string, releaseID int64, content string) (*Reaction, *Response, error) { u := fmt.Sprintf("repos/%v/%v/releases/%v/reactions", owner, repo, releaseID) diff --git a/vendor/github.com/google/go-github/v42/github/repos.go b/vendor/github.com/google/go-github/v45/github/repos.go similarity index 81% rename from vendor/github.com/google/go-github/v42/github/repos.go rename to vendor/github.com/google/go-github/v45/github/repos.go index 2239c70001..a5f7fc6cf8 100644 --- a/vendor/github.com/google/go-github/v42/github/repos.go +++ b/vendor/github.com/google/go-github/v45/github/repos.go @@ -21,55 +21,58 @@ var ErrBranchNotProtected = errors.New("branch is not protected") // RepositoriesService handles communication with the repository related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/ +// GitHub API docs: https://docs.github.com/en/rest/repos/ type RepositoriesService service // Repository represents a GitHub repository. type Repository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - MirrorURL *string `json:"mirror_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Language *string `json:"language,omitempty"` - Fork *bool `json:"fork,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - NetworkCount *int `json:"network_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` // Deprecated: Replaced by OpenIssuesCount. For backward compatibility OpenIssues is still populated. - StargazersCount *int `json:"stargazers_count,omitempty"` - SubscribersCount *int `json:"subscribers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility WatchersCount is still populated. - Watchers *int `json:"watchers,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility Watchers is still populated. - Size *int `json:"size,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - Parent *Repository `json:"parent,omitempty"` - Source *Repository `json:"source,omitempty"` - TemplateRepository *Repository `json:"template_repository,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Permissions map[string]bool `json:"permissions,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` - DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` - Topics []string `json:"topics,omitempty"` - Archived *bool `json:"archived,omitempty"` - Disabled *bool `json:"disabled,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Owner *User `json:"owner,omitempty"` + Name *string `json:"name,omitempty"` + FullName *string `json:"full_name,omitempty"` + Description *string `json:"description,omitempty"` + Homepage *string `json:"homepage,omitempty"` + CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` + DefaultBranch *string `json:"default_branch,omitempty"` + MasterBranch *string `json:"master_branch,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + PushedAt *Timestamp `json:"pushed_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + CloneURL *string `json:"clone_url,omitempty"` + GitURL *string `json:"git_url,omitempty"` + MirrorURL *string `json:"mirror_url,omitempty"` + SSHURL *string `json:"ssh_url,omitempty"` + SVNURL *string `json:"svn_url,omitempty"` + Language *string `json:"language,omitempty"` + Fork *bool `json:"fork,omitempty"` + ForksCount *int `json:"forks_count,omitempty"` + NetworkCount *int `json:"network_count,omitempty"` + OpenIssuesCount *int `json:"open_issues_count,omitempty"` + OpenIssues *int `json:"open_issues,omitempty"` // Deprecated: Replaced by OpenIssuesCount. For backward compatibility OpenIssues is still populated. + StargazersCount *int `json:"stargazers_count,omitempty"` + SubscribersCount *int `json:"subscribers_count,omitempty"` + WatchersCount *int `json:"watchers_count,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility WatchersCount is still populated. + Watchers *int `json:"watchers,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility Watchers is still populated. + Size *int `json:"size,omitempty"` + AutoInit *bool `json:"auto_init,omitempty"` + Parent *Repository `json:"parent,omitempty"` + Source *Repository `json:"source,omitempty"` + TemplateRepository *Repository `json:"template_repository,omitempty"` + Organization *Organization `json:"organization,omitempty"` + Permissions map[string]bool `json:"permissions,omitempty"` + AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` + AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` + AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` + AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` + AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` + AllowForking *bool `json:"allow_forking,omitempty"` + DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` + UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` + Topics []string `json:"topics,omitempty"` + Archived *bool `json:"archived,omitempty"` + Disabled *bool `json:"disabled,omitempty"` // Only provided when using RepositoriesService.Get while in preview License *License `json:"license,omitempty"` @@ -131,13 +134,17 @@ type Repository struct { TeamsURL *string `json:"teams_url,omitempty"` // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#text-match-metadata + // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata TextMatches []*TextMatch `json:"text_matches,omitempty"` // Visibility is only used for Create and Edit endpoints. The visibility field // overrides the field parameter when both are used. // Can be one of public, private or internal. Visibility *string `json:"visibility,omitempty"` + + // RoleName is only returned by the API 'check team permissions for a repository'. + // See: teams.go (IsTeamRepoByID) https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository + RoleName *string `json:"role_name,omitempty"` } func (r Repository) String() string { @@ -227,8 +234,8 @@ func (s SecretScanning) String() string { // List the repositories for a user. Passing the empty string will list // repositories for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repositories-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repositories-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repositories-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repositories-for-a-user func (s *RepositoriesService) List(ctx context.Context, user string, opts *RepositoryListOptions) ([]*Repository, *Response, error) { var u string if user != "" { @@ -279,7 +286,7 @@ type RepositoryListByOrgOptions struct { // ListByOrg lists the repositories for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-organization-repositories +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-organization-repositories func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opts *RepositoryListByOrgOptions) ([]*Repository, *Response, error) { u := fmt.Sprintf("orgs/%v/repos", org) u, err := addOptions(u, opts) @@ -314,7 +321,7 @@ type RepositoryListAllOptions struct { // ListAll lists all GitHub repositories in the order that they were created. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-public-repositories +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-public-repositories func (s *RepositoriesService) ListAll(ctx context.Context, opts *RepositoryListAllOptions) ([]*Repository, *Response, error) { u, err := addOptions("repositories", opts) if err != nil { @@ -356,14 +363,17 @@ type createRepoRequest struct { // Creating an organization repository. Required for non-owners. TeamID *int64 `json:"team_id,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - GitignoreTemplate *string `json:"gitignore_template,omitempty"` - LicenseTemplate *string `json:"license_template,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` - DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` + AutoInit *bool `json:"auto_init,omitempty"` + GitignoreTemplate *string `json:"gitignore_template,omitempty"` + LicenseTemplate *string `json:"license_template,omitempty"` + AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` + AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` + AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` + AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` + AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` + AllowForking *bool `json:"allow_forking,omitempty"` + DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` + UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` } // Create a new repository. If an organization is specified, the new @@ -378,8 +388,8 @@ type createRepoRequest struct { // changes propagate throughout its servers. You may set up a loop with // exponential back-off to verify repository's creation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-repository-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-an-organization-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-an-organization-repository func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repository) (*Repository, *Response, error) { var u string if org != "" { @@ -389,24 +399,27 @@ func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repo } repoReq := &createRepoRequest{ - Name: repo.Name, - Description: repo.Description, - Homepage: repo.Homepage, - Private: repo.Private, - Visibility: repo.Visibility, - HasIssues: repo.HasIssues, - HasProjects: repo.HasProjects, - HasWiki: repo.HasWiki, - IsTemplate: repo.IsTemplate, - TeamID: repo.TeamID, - AutoInit: repo.AutoInit, - GitignoreTemplate: repo.GitignoreTemplate, - LicenseTemplate: repo.LicenseTemplate, - AllowSquashMerge: repo.AllowSquashMerge, - AllowMergeCommit: repo.AllowMergeCommit, - AllowRebaseMerge: repo.AllowRebaseMerge, - AllowAutoMerge: repo.AllowAutoMerge, - DeleteBranchOnMerge: repo.DeleteBranchOnMerge, + Name: repo.Name, + Description: repo.Description, + Homepage: repo.Homepage, + Private: repo.Private, + Visibility: repo.Visibility, + HasIssues: repo.HasIssues, + HasProjects: repo.HasProjects, + HasWiki: repo.HasWiki, + IsTemplate: repo.IsTemplate, + TeamID: repo.TeamID, + AutoInit: repo.AutoInit, + GitignoreTemplate: repo.GitignoreTemplate, + LicenseTemplate: repo.LicenseTemplate, + AllowSquashMerge: repo.AllowSquashMerge, + AllowMergeCommit: repo.AllowMergeCommit, + AllowRebaseMerge: repo.AllowRebaseMerge, + AllowUpdateBranch: repo.AllowUpdateBranch, + AllowAutoMerge: repo.AllowAutoMerge, + AllowForking: repo.AllowForking, + DeleteBranchOnMerge: repo.DeleteBranchOnMerge, + UseSquashPRTitleAsDefault: repo.UseSquashPRTitleAsDefault, } req, err := s.client.NewRequest("POST", u, repoReq) @@ -438,7 +451,7 @@ type TemplateRepoRequest struct { // CreateFromTemplate generates a repository from a template. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-repository-using-a-template +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-using-a-template func (s *RepositoriesService) CreateFromTemplate(ctx context.Context, templateOwner, templateRepo string, templateRepoReq *TemplateRepoRequest) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/generate", templateOwner, templateRepo) @@ -459,7 +472,7 @@ func (s *RepositoriesService) CreateFromTemplate(ctx context.Context, templateOw // Get fetches a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -468,7 +481,7 @@ func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Rep } // TODO: remove custom Accept header when the license support fully launches - // https://docs.github.com/en/free-pro-team@latest/rest/reference/licenses/#get-a-repositorys-license + // https://docs.github.com/en/rest/licenses/#get-a-repositorys-license acceptHeaders := []string{ mediaTypeCodesOfConductPreview, mediaTypeTopicsPreview, @@ -487,10 +500,12 @@ func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Rep } // GetCodeOfConduct gets the contents of a repository's code of conduct. +// Note that https://docs.github.com/en/rest/codes-of-conduct#about-the-codes-of-conduct-api +// says to use the GET /repos/{owner}/{repo} endpoint. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/codes-of-conduct/#get-the-code-of-conduct-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository func (s *RepositoriesService) GetCodeOfConduct(ctx context.Context, owner, repo string) (*CodeOfConduct, *Response, error) { - u := fmt.Sprintf("repos/%v/%v/community/code_of_conduct", owner, repo) + u := fmt.Sprintf("repos/%v/%v", owner, repo) req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err @@ -499,13 +514,13 @@ func (s *RepositoriesService) GetCodeOfConduct(ctx context.Context, owner, repo // TODO: remove custom Accept header when this API fully launches. req.Header.Set("Accept", mediaTypeCodesOfConductPreview) - coc := new(CodeOfConduct) - resp, err := s.client.Do(ctx, req, coc) + r := new(Repository) + resp, err := s.client.Do(ctx, req, r) if err != nil { return nil, resp, err } - return coc, resp, nil + return r.GetCodeOfConduct(), resp, nil } // GetByID fetches a repository. @@ -529,7 +544,7 @@ func (s *RepositoriesService) GetByID(ctx context.Context, id int64) (*Repositor // Edit updates a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#update-a-repository func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repository *Repository) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v", owner, repo) req, err := s.client.NewRequest("PATCH", u, repository) @@ -550,7 +565,7 @@ func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repo // Delete a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#delete-a-repository func (s *RepositoriesService) Delete(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) @@ -597,7 +612,7 @@ type ListContributorsOptions struct { // GetVulnerabilityAlerts checks if vulnerability alerts are enabled for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#check-if-vulnerability-alerts-are-enabled-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#check-if-vulnerability-alerts-are-enabled-for-a-repository func (s *RepositoriesService) GetVulnerabilityAlerts(ctx context.Context, owner, repository string) (bool, *Response, error) { u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) @@ -611,13 +626,12 @@ func (s *RepositoriesService) GetVulnerabilityAlerts(ctx context.Context, owner, resp, err := s.client.Do(ctx, req, nil) vulnerabilityAlertsEnabled, err := parseBoolResponse(err) - return vulnerabilityAlertsEnabled, resp, err } // EnableVulnerabilityAlerts enables vulnerability alerts and the dependency graph for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#enable-vulnerability-alerts +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#enable-vulnerability-alerts func (s *RepositoriesService) EnableVulnerabilityAlerts(ctx context.Context, owner, repository string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) @@ -634,7 +648,7 @@ func (s *RepositoriesService) EnableVulnerabilityAlerts(ctx context.Context, own // DisableVulnerabilityAlerts disables vulnerability alerts and the dependency graph for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#disable-vulnerability-alerts +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#disable-vulnerability-alerts func (s *RepositoriesService) DisableVulnerabilityAlerts(ctx context.Context, owner, repository string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/vulnerability-alerts", owner, repository) @@ -651,7 +665,7 @@ func (s *RepositoriesService) DisableVulnerabilityAlerts(ctx context.Context, ow // EnableAutomatedSecurityFixes enables the automated security fixes for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#enable-automated-security-fixes +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#enable-automated-security-fixes func (s *RepositoriesService) EnableAutomatedSecurityFixes(ctx context.Context, owner, repository string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/automated-security-fixes", owner, repository) @@ -668,7 +682,7 @@ func (s *RepositoriesService) EnableAutomatedSecurityFixes(ctx context.Context, // DisableAutomatedSecurityFixes disables vulnerability alerts and the dependency graph for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#disable-automated-security-fixes +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#disable-automated-security-fixes func (s *RepositoriesService) DisableAutomatedSecurityFixes(ctx context.Context, owner, repository string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/automated-security-fixes", owner, repository) @@ -685,7 +699,7 @@ func (s *RepositoriesService) DisableAutomatedSecurityFixes(ctx context.Context, // ListContributors lists contributors for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-contributors +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-contributors func (s *RepositoriesService) ListContributors(ctx context.Context, owner string, repository string, opts *ListContributorsOptions) ([]*Contributor, *Response, error) { u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository) u, err := addOptions(u, opts) @@ -716,7 +730,7 @@ func (s *RepositoriesService) ListContributors(ctx context.Context, owner string // "Python": 7769 // } // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-languages +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-languages func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, repo string) (map[string]int, *Response, error) { u := fmt.Sprintf("repos/%v/%v/languages", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -735,7 +749,7 @@ func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, r // ListTeams lists the teams for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-teams +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-teams func (s *RepositoriesService) ListTeams(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Team, *Response, error) { u := fmt.Sprintf("repos/%v/%v/teams", owner, repo) u, err := addOptions(u, opts) @@ -767,7 +781,7 @@ type RepositoryTag struct { // ListTags lists tags for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-tags +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#list-repository-tags func (s *RepositoriesService) ListTags(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*RepositoryTag, *Response, error) { u := fmt.Sprintf("repos/%v/%v/tags", owner, repo) u, err := addOptions(u, opts) @@ -874,14 +888,33 @@ type RequiredStatusChecks struct { // Require branches to be up to date before merging. (Required.) Strict bool `json:"strict"` // The list of status checks to require in order to merge into this - // branch. (Required; use []string{} instead of nil for empty list.) - Contexts []string `json:"contexts"` + // branch. (Deprecated. Note: only one of Contexts/Checks can be populated, + // but at least one must be populated). + Contexts []string `json:"contexts,omitempty"` + // The list of status checks to require in order to merge into this + // branch. + Checks []*RequiredStatusCheck `json:"checks,omitempty"` } // RequiredStatusChecksRequest represents a request to edit a protected branch's status checks. type RequiredStatusChecksRequest struct { - Strict *bool `json:"strict,omitempty"` - Contexts []string `json:"contexts,omitempty"` + Strict *bool `json:"strict,omitempty"` + // Note: if both Contexts and Checks are populated, + // the GitHub API will only use Checks. + Contexts []string `json:"contexts,omitempty"` + Checks []*RequiredStatusCheck `json:"checks,omitempty"` +} + +// RequiredStatusCheck represents a status check of a protected branch. +type RequiredStatusCheck struct { + // The name of the required check. + Context string `json:"context"` + // The ID of the GitHub App that must provide this check. + // Omit this field to automatically select the GitHub App + // that has recently provided this check, + // or any app if it was not set by a GitHub App. + // Pass -1 to explicitly allow any app to set the status. + AppID *int64 `json:"app_id,omitempty"` } // PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch. @@ -922,10 +955,10 @@ type PullRequestReviewsEnforcementUpdate struct { DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"` // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted. DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"` - // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner. - RequireCodeOwnerReviews bool `json:"require_code_owner_reviews,omitempty"` + // RequireCodeOwnerReviews specifies if merging pull requests is blocked until code owners have reviewed. + RequireCodeOwnerReviews *bool `json:"require_code_owner_reviews,omitempty"` // RequiredApprovingReviewCount specifies the number of approvals required before the pull request can be merged. - // Valid values are 1 - 6. + // Valid values are 1 - 6 or 0 to not require reviewers. RequiredApprovingReviewCount int `json:"required_approving_review_count"` } @@ -1008,7 +1041,7 @@ type SignaturesProtectedBranch struct { // ListBranches lists branches for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-branches +// GitHub API docs: https://docs.github.com/en/rest/branches/branches#list-branches func (s *RepositoriesService) ListBranches(ctx context.Context, owner string, repo string, opts *BranchListOptions) ([]*Branch, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches", owner, repo) u, err := addOptions(u, opts) @@ -1032,11 +1065,11 @@ func (s *RepositoriesService) ListBranches(ctx context.Context, owner string, re // GetBranch gets the specified branch for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-branch +// GitHub API docs: https://docs.github.com/en/rest/branches/branches#get-a-branch func (s *RepositoriesService) GetBranch(ctx context.Context, owner, repo, branch string, followRedirects bool) (*Branch, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch) - resp, err := s.getBranchFromURL(ctx, u, followRedirects) + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) if err != nil { return nil, nil, err } @@ -1051,33 +1084,6 @@ func (s *RepositoriesService) GetBranch(ctx context.Context, owner, repo, branch return b, newResponse(resp), err } -func (s *RepositoriesService) getBranchFromURL(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, err - } - - // If redirect response is returned, follow it - if followRedirects && resp.StatusCode == http.StatusMovedPermanently { - resp.Body.Close() - u = resp.Header.Get("Location") - resp, err = s.getBranchFromURL(ctx, u, false) - } - return resp, err -} - // renameBranchRequest represents a request to rename a branch. type renameBranchRequest struct { NewName string `json:"new_name"` @@ -1088,7 +1094,7 @@ type renameBranchRequest struct { // To rename a non-default branch: Users must have push access. GitHub Apps must have the `contents:write` repository permission. // To rename the default branch: Users must have admin or owner permissions. GitHub Apps must have the `administration:write` repository permission. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#rename-a-branch +// GitHub API docs: https://docs.github.com/en/rest/branches/branches#rename-a-branch func (s *RepositoriesService) RenameBranch(ctx context.Context, owner, repo, branch, newName string) (*Branch, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/rename", owner, repo, branch) r := &renameBranchRequest{NewName: newName} @@ -1108,7 +1114,7 @@ func (s *RepositoriesService) RenameBranch(ctx context.Context, owner, repo, bra // GetBranchProtection gets the protection of a given branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-branch-protection func (s *RepositoriesService) GetBranchProtection(ctx context.Context, owner, repo, branch string) (*Protection, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1133,7 +1139,7 @@ func (s *RepositoriesService) GetBranchProtection(ctx context.Context, owner, re // GetRequiredStatusChecks gets the required status checks for a given protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-status-checks-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-status-checks-protection func (s *RepositoriesService) GetRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*RequiredStatusChecks, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1155,7 +1161,7 @@ func (s *RepositoriesService) GetRequiredStatusChecks(ctx context.Context, owner // ListRequiredStatusChecksContexts lists the required status checks contexts for a given protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-all-status-check-contexts +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-all-status-check-contexts func (s *RepositoriesService) ListRequiredStatusChecksContexts(ctx context.Context, owner, repo, branch string) (contexts []string, resp *Response, err error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks/contexts", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1176,7 +1182,7 @@ func (s *RepositoriesService) ListRequiredStatusChecksContexts(ctx context.Conte // UpdateBranchProtection updates the protection of a given branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-branch-protection func (s *RepositoriesService) UpdateBranchProtection(ctx context.Context, owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) req, err := s.client.NewRequest("PUT", u, preq) @@ -1198,7 +1204,7 @@ func (s *RepositoriesService) UpdateBranchProtection(ctx context.Context, owner, // RemoveBranchProtection removes the protection of a given branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-branch-protection func (s *RepositoriesService) RemoveBranchProtection(ctx context.Context, owner, repo, branch string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, nil) @@ -1211,7 +1217,7 @@ func (s *RepositoriesService) RemoveBranchProtection(ctx context.Context, owner, // GetSignaturesProtectedBranch gets required signatures of protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-commit-signature-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-commit-signature-protection func (s *RepositoriesService) GetSignaturesProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1234,7 +1240,7 @@ func (s *RepositoriesService) GetSignaturesProtectedBranch(ctx context.Context, // RequireSignaturesOnProtectedBranch makes signed commits required on a protected branch. // It requires admin access and branch protection to be enabled. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-commit-signature-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#create-commit-signature-protection func (s *RepositoriesService) RequireSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*SignaturesProtectedBranch, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) req, err := s.client.NewRequest("POST", u, nil) @@ -1251,12 +1257,12 @@ func (s *RepositoriesService) RequireSignaturesOnProtectedBranch(ctx context.Con return nil, resp, err } - return r, resp, err + return r, resp, nil } // OptionalSignaturesOnProtectedBranch removes required signed commits on a given branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-commit-signature-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-commit-signature-protection func (s *RepositoriesService) OptionalSignaturesOnProtectedBranch(ctx context.Context, owner, repo, branch string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_signatures", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, nil) @@ -1272,7 +1278,7 @@ func (s *RepositoriesService) OptionalSignaturesOnProtectedBranch(ctx context.Co // UpdateRequiredStatusChecks updates the required status checks for a given protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-status-check-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-status-check-protection func (s *RepositoriesService) UpdateRequiredStatusChecks(ctx context.Context, owner, repo, branch string, sreq *RequiredStatusChecksRequest) (*RequiredStatusChecks, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) req, err := s.client.NewRequest("PATCH", u, sreq) @@ -1291,7 +1297,7 @@ func (s *RepositoriesService) UpdateRequiredStatusChecks(ctx context.Context, ow // RemoveRequiredStatusChecks removes the required status checks for a given protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos#remove-status-check-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-status-check-protection func (s *RepositoriesService) RemoveRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, nil) @@ -1304,7 +1310,7 @@ func (s *RepositoriesService) RemoveRequiredStatusChecks(ctx context.Context, ow // License gets the contents of a repository's license if one is detected. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/licenses/#get-the-license-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/licenses#get-the-license-for-a-repository func (s *RepositoriesService) License(ctx context.Context, owner, repo string) (*RepositoryLicense, *Response, error) { u := fmt.Sprintf("repos/%v/%v/license", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -1323,7 +1329,7 @@ func (s *RepositoriesService) License(ctx context.Context, owner, repo string) ( // GetPullRequestReviewEnforcement gets pull request review enforcement of a protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-pull-request-review-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-pull-request-review-protection func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1346,7 +1352,7 @@ func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Contex // UpdatePullRequestReviewEnforcement patches pull request review enforcement of a protected branch. // It requires admin access and branch protection to be enabled. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-pull-request-review-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-pull-request-review-protection func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string, patch *PullRequestReviewsEnforcementUpdate) (*PullRequestReviewsEnforcement, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) req, err := s.client.NewRequest("PATCH", u, patch) @@ -1363,13 +1369,13 @@ func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Con return nil, resp, err } - return r, resp, err + return r, resp, nil } // DisableDismissalRestrictions disables dismissal restrictions of a protected branch. // It requires admin access and branch protection to be enabled. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-pull-request-review-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#update-pull-request-review-protection func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) @@ -1391,12 +1397,12 @@ func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, return nil, resp, err } - return r, resp, err + return r, resp, nil } // RemovePullRequestReviewEnforcement removes pull request enforcement of a protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-pull-request-review-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-pull-request-review-protection func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, nil) @@ -1409,7 +1415,7 @@ func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Con // GetAdminEnforcement gets admin enforcement information of a protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-admin-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-admin-branch-protection func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1429,7 +1435,7 @@ func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, re // AddAdminEnforcement adds admin enforcement to a protected branch. // It requires admin access and branch protection to be enabled. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#set-admin-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-admin-branch-protection func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) req, err := s.client.NewRequest("POST", u, nil) @@ -1443,12 +1449,12 @@ func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, re return nil, resp, err } - return r, resp, err + return r, resp, nil } // RemoveAdminEnforcement removes admin enforcement from a protected branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-admin-branch-protection +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#delete-admin-branch-protection func (s *RepositoriesService) RemoveAdminEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, nil) @@ -1466,7 +1472,7 @@ type repositoryTopics struct { // ListAllTopics lists topics for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-all-repository-topics +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#get-all-repository-topics func (s *RepositoriesService) ListAllTopics(ctx context.Context, owner, repo string) ([]string, *Response, error) { u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -1486,9 +1492,9 @@ func (s *RepositoriesService) ListAllTopics(ctx context.Context, owner, repo str return topics.Names, resp, nil } -// ReplaceAllTopics replaces topics for a repository. +// ReplaceAllTopics replaces all repository topics. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#replace-all-repository-topics +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#replace-all-repository-topics func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo string, topics []string) ([]string, *Response, error) { u := fmt.Sprintf("repos/%v/%v/topics", owner, repo) t := &repositoryTopics{ @@ -1517,7 +1523,7 @@ func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo // ListApps lists the GitHub apps that have push access to a given protected branch. // It requires the GitHub apps to have `write` access to the `content` permission. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-apps-with-access-to-the-protected-branch +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#get-apps-with-access-to-the-protected-branch func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) req, err := s.client.NewRequest("GET", u, nil) @@ -1540,7 +1546,7 @@ func (s *RepositoriesService) ListApps(ctx context.Context, owner, repo, branch // // Note: The list of users, apps, and teams in total is limited to 100 items. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#set-app-access-restrictions +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#set-app-access-restrictions func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) req, err := s.client.NewRequest("PUT", u, slug) @@ -1562,7 +1568,7 @@ func (s *RepositoriesService) ReplaceAppRestrictions(ctx context.Context, owner, // // Note: The list of users, apps, and teams in total is limited to 100 items. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#add-app-access-restrictions +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#add-app-access-restrictions func (s *RepositoriesService) AddAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) req, err := s.client.NewRequest("POST", u, slug) @@ -1584,7 +1590,7 @@ func (s *RepositoriesService) AddAppRestrictions(ctx context.Context, owner, rep // // Note: The list of users, apps, and teams in total is limited to 100 items. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#remove-app-access-restrictions +// GitHub API docs: https://docs.github.com/en/rest/branches/branch-protection#remove-app-access-restrictions func (s *RepositoriesService) RemoveAppRestrictions(ctx context.Context, owner, repo, branch string, slug []string) ([]*App, *Response, error) { u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/restrictions/apps", owner, repo, branch) req, err := s.client.NewRequest("DELETE", u, slug) @@ -1615,7 +1621,7 @@ type TransferRequest struct { // A follow up request, after a delay of a second or so, should result // in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#transfer-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#transfer-a-repository func (s *RepositoriesService) Transfer(ctx context.Context, owner, repo string, transfer TransferRequest) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/transfer", owner, repo) @@ -1644,7 +1650,7 @@ type DispatchRequestOptions struct { // Dispatch triggers a repository_dispatch event in a GitHub Actions workflow. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-repository-dispatch-event +// GitHub API docs: https://docs.github.com/en/rest/repos/repos#create-a-repository-dispatch-event func (s *RepositoriesService) Dispatch(ctx context.Context, owner, repo string, opts DispatchRequestOptions) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/dispatches", owner, repo) diff --git a/vendor/github.com/google/go-github/v45/github/repos_actions_allowed.go b/vendor/github.com/google/go-github/v45/github/repos_actions_allowed.go new file mode 100644 index 0000000000..25a4690583 --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/repos_actions_allowed.go @@ -0,0 +1,49 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// GetActionsAllowed gets the allowed actions and reusable workflows for a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-allowed-actions-and-reusable-workflows-for-a-repository +func (s *RepositoriesService) GetActionsAllowed(ctx context.Context, org, repo string) (*ActionsAllowed, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions/selected-actions", org, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + actionsAllowed := new(ActionsAllowed) + resp, err := s.client.Do(ctx, req, actionsAllowed) + if err != nil { + return nil, resp, err + } + + return actionsAllowed, resp, nil +} + +// EditActionsAllowed sets the allowed actions and reusable workflows for a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-allowed-actions-and-reusable-workflows-for-a-repository +func (s *RepositoriesService) EditActionsAllowed(ctx context.Context, org, repo string, actionsAllowed ActionsAllowed) (*ActionsAllowed, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions/selected-actions", org, repo) + req, err := s.client.NewRequest("PUT", u, actionsAllowed) + if err != nil { + return nil, nil, err + } + + p := new(ActionsAllowed) + resp, err := s.client.Do(ctx, req, p) + if err != nil { + return nil, resp, err + } + + return p, resp, nil +} diff --git a/vendor/github.com/google/go-github/v45/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v45/github/repos_actions_permissions.go new file mode 100644 index 0000000000..45f844cec0 --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/repos_actions_permissions.go @@ -0,0 +1,62 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// ActionsPermissionsRepository represents a policy for repositories and allowed actions in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions +type ActionsPermissionsRepository struct { + Enabled *bool `json:"enabled,omitempty"` + AllowedActions *string `json:"allowed_actions,omitempty"` + SelectedActionsURL *string `json:"selected_actions_url,omitempty"` +} + +func (a ActionsPermissionsRepository) String() string { + return Stringify(a) +} + +// GetActionsPermissions gets the GitHub Actions permissions policy for repositories and allowed actions in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#get-github-actions-permissions-for-a-repository +func (s *RepositoriesService) GetActionsPermissions(ctx context.Context, owner, repo string) (*ActionsPermissionsRepository, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + permissions := new(ActionsPermissionsRepository) + resp, err := s.client.Do(ctx, req, permissions) + if err != nil { + return nil, resp, err + } + + return permissions, resp, nil +} + +// EditActionsPermissions sets the permissions policy for repositories and allowed actions in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/actions/permissions#set-github-actions-permissions-for-a-repository +func (s *RepositoriesService) EditActionsPermissions(ctx context.Context, owner, repo string, actionsPermissionsRepository ActionsPermissionsRepository) (*ActionsPermissionsRepository, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/permissions", owner, repo) + req, err := s.client.NewRequest("PUT", u, actionsPermissionsRepository) + if err != nil { + return nil, nil, err + } + + permissions := new(ActionsPermissionsRepository) + resp, err := s.client.Do(ctx, req, permissions) + if err != nil { + return nil, resp, err + } + + return permissions, resp, nil +} diff --git a/vendor/github.com/google/go-github/v42/github/repos_autolinks.go b/vendor/github.com/google/go-github/v45/github/repos_autolinks.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/repos_autolinks.go rename to vendor/github.com/google/go-github/v45/github/repos_autolinks.go index b6404783eb..8fa916eac2 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_autolinks.go +++ b/vendor/github.com/google/go-github/v45/github/repos_autolinks.go @@ -26,7 +26,7 @@ type Autolink struct { // ListAutolinks returns a list of autolinks configured for the given repository. // Information about autolinks are only available to repository administrators. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#list-all-autolinks-of-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#list-all-autolinks-of-a-repository func (s *RepositoriesService) ListAutolinks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Autolink, *Response, error) { u := fmt.Sprintf("repos/%v/%v/autolinks", owner, repo) u, err := addOptions(u, opts) @@ -51,7 +51,7 @@ func (s *RepositoriesService) ListAutolinks(ctx context.Context, owner, repo str // AddAutolink creates an autolink reference for a repository. // Users with admin access to the repository can create an autolink. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#create-an-autolink-reference-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#create-an-autolink-reference-for-a-repository func (s *RepositoriesService) AddAutolink(ctx context.Context, owner, repo string, opts *AutolinkOptions) (*Autolink, *Response, error) { u := fmt.Sprintf("repos/%v/%v/autolinks", owner, repo) req, err := s.client.NewRequest("POST", u, opts) @@ -70,7 +70,7 @@ func (s *RepositoriesService) AddAutolink(ctx context.Context, owner, repo strin // GetAutolink returns a single autolink reference by ID that was configured for the given repository. // Information about autolinks are only available to repository administrators. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#get-an-autolink-reference-of-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#get-an-autolink-reference-of-a-repository func (s *RepositoriesService) GetAutolink(ctx context.Context, owner, repo string, id int64) (*Autolink, *Response, error) { u := fmt.Sprintf("repos/%v/%v/autolinks/%v", owner, repo, id) @@ -91,7 +91,7 @@ func (s *RepositoriesService) GetAutolink(ctx context.Context, owner, repo strin // DeleteAutolink deletes a single autolink reference by ID that was configured for the given repository. // Information about autolinks are only available to repository administrators. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#delete-an-autolink-reference-from-a-repository +// GitHub API docs: https://docs.github.com/en/rest/repos/autolinks#delete-an-autolink-reference-from-a-repository func (s *RepositoriesService) DeleteAutolink(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/autolinks/%v", owner, repo, id) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/repos_collaborators.go b/vendor/github.com/google/go-github/v45/github/repos_collaborators.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/repos_collaborators.go rename to vendor/github.com/google/go-github/v45/github/repos_collaborators.go index ccb97a192a..abc4161c3b 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_collaborators.go +++ b/vendor/github.com/google/go-github/v45/github/repos_collaborators.go @@ -27,7 +27,7 @@ type ListCollaboratorsOptions struct { } // CollaboratorInvitation represents an invitation created when adding a collaborator. -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/collaborators/#response-when-a-new-invitation-is-created +// GitHub API docs: https://docs.github.com/en/rest/repos/collaborators/#response-when-a-new-invitation-is-created type CollaboratorInvitation struct { ID *int64 `json:"id,omitempty"` Repo *Repository `json:"repository,omitempty"` @@ -41,7 +41,7 @@ type CollaboratorInvitation struct { // ListCollaborators lists the GitHub users that have access to the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-collaborators +// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#list-repository-collaborators func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opts *ListCollaboratorsOptions) ([]*User, *Response, error) { u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo) u, err := addOptions(u, opts) @@ -68,7 +68,7 @@ func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo // Note: This will return false if the user is not a collaborator OR the user // is not a GitHub user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#check-if-a-user-is-a-repository-collaborator +// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#check-if-a-user-is-a-repository-collaborator func (s *RepositoriesService) IsCollaborator(ctx context.Context, owner, repo, user string) (bool, *Response, error) { u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) req, err := s.client.NewRequest("GET", u, nil) @@ -91,7 +91,8 @@ type RepositoryPermissionLevel struct { } // GetPermissionLevel retrieves the specific permission level a collaborator has for a given repository. -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-repository-permissions-for-a-user +// +// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#get-repository-permissions-for-a-user func (s *RepositoriesService) GetPermissionLevel(ctx context.Context, owner, repo, user string) (*RepositoryPermissionLevel, *Response, error) { u := fmt.Sprintf("repos/%v/%v/collaborators/%v/permission", owner, repo, user) req, err := s.client.NewRequest("GET", u, nil) @@ -104,6 +105,7 @@ func (s *RepositoriesService) GetPermissionLevel(ctx context.Context, owner, rep if err != nil { return nil, resp, err } + return rpl, resp, nil } @@ -125,30 +127,33 @@ type RepositoryAddCollaboratorOptions struct { // AddCollaborator sends an invitation to the specified GitHub user // to become a collaborator to the given repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#add-a-repository-collaborator +// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#add-a-repository-collaborator func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opts *RepositoryAddCollaboratorOptions) (*CollaboratorInvitation, *Response, error) { u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) req, err := s.client.NewRequest("PUT", u, opts) if err != nil { return nil, nil, err } + acr := new(CollaboratorInvitation) resp, err := s.client.Do(ctx, req, acr) if err != nil { return nil, resp, err } + return acr, resp, nil } // RemoveCollaborator removes the specified GitHub user as collaborator from the given repo. // Note: Does not return error if a valid user that is not a collaborator is removed. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#remove-a-repository-collaborator +// GitHub API docs: https://docs.github.com/en/rest/collaborators/collaborators#remove-a-repository-collaborator func (s *RepositoriesService) RemoveCollaborator(ctx context.Context, owner, repo, user string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user) req, err := s.client.NewRequest("DELETE", u, nil) if err != nil { return nil, err } + return s.client.Do(ctx, req, nil) } diff --git a/vendor/github.com/google/go-github/v42/github/repos_comments.go b/vendor/github.com/google/go-github/v45/github/repos_comments.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/repos_comments.go rename to vendor/github.com/google/go-github/v45/github/repos_comments.go index 912eeba3fb..55a88d1f5e 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_comments.go +++ b/vendor/github.com/google/go-github/v45/github/repos_comments.go @@ -36,7 +36,7 @@ func (r RepositoryComment) String() string { // ListComments lists all the comments for the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-commit-comments-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#list-commit-comments-for-a-repository func (s *RepositoriesService) ListComments(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/comments", owner, repo) u, err := addOptions(u, opts) @@ -63,7 +63,7 @@ func (s *RepositoriesService) ListComments(ctx context.Context, owner, repo stri // ListCommitComments lists all the comments for a given commit SHA. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-commit-comments +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#list-commit-comments func (s *RepositoriesService) ListCommitComments(ctx context.Context, owner, repo, sha string, opts *ListOptions) ([]*RepositoryComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) u, err := addOptions(u, opts) @@ -91,7 +91,7 @@ func (s *RepositoriesService) ListCommitComments(ctx context.Context, owner, rep // CreateComment creates a comment for the given commit. // Note: GitHub allows for comments to be created for non-existing files and positions. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#create-a-commit-comment func (s *RepositoriesService) CreateComment(ctx context.Context, owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha) req, err := s.client.NewRequest("POST", u, comment) @@ -110,7 +110,7 @@ func (s *RepositoriesService) CreateComment(ctx context.Context, owner, repo, sh // GetComment gets a single comment from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#get-a-commit-comment func (s *RepositoriesService) GetComment(ctx context.Context, owner, repo string, id int64) (*RepositoryComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) req, err := s.client.NewRequest("GET", u, nil) @@ -132,7 +132,7 @@ func (s *RepositoriesService) GetComment(ctx context.Context, owner, repo string // UpdateComment updates the body of a single comment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#update-a-commit-comment func (s *RepositoriesService) UpdateComment(ctx context.Context, owner, repo string, id int64, comment *RepositoryComment) (*RepositoryComment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) req, err := s.client.NewRequest("PATCH", u, comment) @@ -151,7 +151,7 @@ func (s *RepositoriesService) UpdateComment(ctx context.Context, owner, repo str // DeleteComment deletes a single comment from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-commit-comment +// GitHub API docs: https://docs.github.com/en/rest/commits/comments#delete-a-commit-comment func (s *RepositoriesService) DeleteComment(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/repos_commits.go b/vendor/github.com/google/go-github/v45/github/repos_commits.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/repos_commits.go rename to vendor/github.com/google/go-github/v45/github/repos_commits.go index ce3b48e3c5..d1fb577c61 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_commits.go +++ b/vendor/github.com/google/go-github/v45/github/repos_commits.go @@ -124,7 +124,7 @@ type BranchCommit struct { // ListCommits lists the commits of a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-commits +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-commits func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo string, opts *CommitsListOptions) ([]*RepositoryCommit, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits", owner, repo) u, err := addOptions(u, opts) @@ -148,8 +148,8 @@ func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo strin // GetCommit fetches the specified commit, including all details about it. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-single-commit -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-single-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit func (s *RepositoriesService) GetCommit(ctx context.Context, owner, repo, sha string, opts *ListOptions) (*RepositoryCommit, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) u, err := addOptions(u, opts) @@ -173,7 +173,7 @@ func (s *RepositoriesService) GetCommit(ctx context.Context, owner, repo, sha st // GetCommitRaw fetches the specified commit in raw (diff or patch) format. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit func (s *RepositoriesService) GetCommitRaw(ctx context.Context, owner string, repo string, sha string, opts RawOptions) (string, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha) req, err := s.client.NewRequest("GET", u, nil) @@ -202,7 +202,7 @@ func (s *RepositoriesService) GetCommitRaw(ctx context.Context, owner string, re // GetCommitSHA1 gets the SHA-1 of a commit reference. If a last-known SHA1 is // supplied and no new commits have occurred, a 304 Unmodified response is returned. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#get-a-commit func (s *RepositoriesService) GetCommitSHA1(ctx context.Context, owner, repo, ref, lastSHA string) (string, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, refURLEscape(ref)) @@ -227,7 +227,7 @@ func (s *RepositoriesService) GetCommitSHA1(ctx context.Context, owner, repo, re // CompareCommits compares a range of commits with each other. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#compare-two-commits +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#compare-two-commits func (s *RepositoriesService) CompareCommits(ctx context.Context, owner, repo string, base, head string, opts *ListOptions) (*CommitsComparison, *Response, error) { escapedBase := url.QueryEscape(base) escapedHead := url.QueryEscape(head) @@ -258,7 +258,7 @@ func (s *RepositoriesService) CompareCommits(ctx context.Context, owner, repo st // To compare branches across other repositories in the same network as "repo", // use the format ":branch". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#compare-two-commits +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#compare-two-commits func (s *RepositoriesService) CompareCommitsRaw(ctx context.Context, owner, repo, base, head string, opts RawOptions) (string, *Response, error) { escapedBase := url.QueryEscape(base) escapedHead := url.QueryEscape(head) @@ -291,7 +291,7 @@ func (s *RepositoriesService) CompareCommitsRaw(ctx context.Context, owner, repo // ListBranchesHeadCommit gets all branches where the given commit SHA is the HEAD, // or latest commit for the branch. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-branches-for-head-commit +// GitHub API docs: https://docs.github.com/en/rest/commits/commits#list-branches-for-head-commit func (s *RepositoriesService) ListBranchesHeadCommit(ctx context.Context, owner, repo, sha string) ([]*BranchCommit, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/branches-where-head", owner, repo, sha) diff --git a/vendor/github.com/google/go-github/v42/github/repos_community_health.go b/vendor/github.com/google/go-github/v45/github/repos_community_health.go similarity index 95% rename from vendor/github.com/google/go-github/v42/github/repos_community_health.go rename to vendor/github.com/google/go-github/v45/github/repos_community_health.go index 92e4d082ce..9de438b625 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_community_health.go +++ b/vendor/github.com/google/go-github/v45/github/repos_community_health.go @@ -44,7 +44,7 @@ type CommunityHealthMetrics struct { // GetCommunityHealthMetrics retrieves all the community health metrics for a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#get-community-profile-metrics +// GitHub API docs: https://docs.github.com/en/rest/metrics/community#get-community-profile-metrics func (s *RepositoriesService) GetCommunityHealthMetrics(ctx context.Context, owner, repo string) (*CommunityHealthMetrics, *Response, error) { u := fmt.Sprintf("repos/%v/%v/community/profile", owner, repo) req, err := s.client.NewRequest("GET", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/repos_contents.go b/vendor/github.com/google/go-github/v45/github/repos_contents.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/repos_contents.go rename to vendor/github.com/google/go-github/v45/github/repos_contents.go index 86e11c0a75..d6f2dd9d74 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_contents.go +++ b/vendor/github.com/google/go-github/v45/github/repos_contents.go @@ -4,7 +4,7 @@ // license that can be found in the LICENSE file. // Repository contents API methods. -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/contents/ +// GitHub API docs: https://docs.github.com/en/rest/repos/contents/ package github @@ -95,22 +95,25 @@ func (r *RepositoryContent) GetContent() (string, error) { // GetReadme gets the Readme file for the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-repository-readme +// GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-a-repository-readme func (s *RepositoriesService) GetReadme(ctx context.Context, owner, repo string, opts *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) { u := fmt.Sprintf("repos/%v/%v/readme", owner, repo) u, err := addOptions(u, opts) if err != nil { return nil, nil, err } + req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, err } + readme := new(RepositoryContent) resp, err := s.client.Do(ctx, req, readme) if err != nil { return nil, resp, err } + return readme, resp, nil } @@ -129,18 +132,22 @@ func (s *RepositoriesService) DownloadContents(ctx context.Context, owner, repo, if err != nil { return nil, resp, err } + for _, contents := range dirContents { if *contents.Name == filename { if contents.DownloadURL == nil || *contents.DownloadURL == "" { return nil, resp, fmt.Errorf("no download link found for %s", filepath) } + dlResp, err := s.client.client.Get(*contents.DownloadURL) if err != nil { return nil, &Response{Response: dlResp}, err } + return dlResp.Body, &Response{Response: dlResp}, nil } } + return nil, resp, fmt.Errorf("no file named %s found in %s", filename, dir) } @@ -159,18 +166,22 @@ func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owne if err != nil { return nil, nil, resp, err } + for _, contents := range dirContents { if *contents.Name == filename { if contents.DownloadURL == nil || *contents.DownloadURL == "" { return nil, contents, resp, fmt.Errorf("no download link found for %s", filepath) } + dlResp, err := s.client.client.Get(*contents.DownloadURL) if err != nil { return nil, contents, &Response{Response: dlResp}, err } + return dlResp.Body, contents, &Response{Response: dlResp}, nil } } + return nil, nil, resp, fmt.Errorf("no file named %s found in %s", filename, dir) } @@ -181,7 +192,7 @@ func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owne // as possible, both result types will be returned but only one will contain a // value and the other will be nil. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-repository-content +// GitHub API docs: https://docs.github.com/en/rest/repos/contents#get-repository-content func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opts *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) { escapedPath := (&url.URL{Path: strings.TrimSuffix(path, "/")}).String() u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath) @@ -189,77 +200,88 @@ func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path if err != nil { return nil, nil, nil, err } + req, err := s.client.NewRequest("GET", u, nil) if err != nil { return nil, nil, nil, err } + var rawJSON json.RawMessage resp, err = s.client.Do(ctx, req, &rawJSON) if err != nil { return nil, nil, resp, err } + fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent) if fileUnmarshalError == nil { return fileContent, nil, resp, nil } + directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent) if directoryUnmarshalError == nil { return nil, directoryContent, resp, nil } + return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s", fileUnmarshalError, directoryUnmarshalError) } // CreateFile creates a new file in a repository at the given path and returns // the commit and file metadata. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-or-update-file-contents +// GitHub API docs: https://docs.github.com/en/rest/repos/contents#create-or-update-file-contents func (s *RepositoriesService) CreateFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) req, err := s.client.NewRequest("PUT", u, opts) if err != nil { return nil, nil, err } + createResponse := new(RepositoryContentResponse) resp, err := s.client.Do(ctx, req, createResponse) if err != nil { return nil, resp, err } + return createResponse, resp, nil } // UpdateFile updates a file in a repository at the given path and returns the // commit and file metadata. Requires the blob SHA of the file being updated. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-or-update-file-contents +// GitHub API docs: https://docs.github.com/en/rest/repos/contents#create-or-update-file-contents func (s *RepositoriesService) UpdateFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) req, err := s.client.NewRequest("PUT", u, opts) if err != nil { return nil, nil, err } + updateResponse := new(RepositoryContentResponse) resp, err := s.client.Do(ctx, req, updateResponse) if err != nil { return nil, resp, err } + return updateResponse, resp, nil } // DeleteFile deletes a file from a repository and returns the commit. // Requires the blob SHA of the file to be deleted. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-file +// GitHub API docs: https://docs.github.com/en/rest/repos/contents#delete-a-file func (s *RepositoriesService) DeleteFile(ctx context.Context, owner, repo, path string, opts *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) { u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path) req, err := s.client.NewRequest("DELETE", u, opts) if err != nil { return nil, nil, err } + deleteResponse := new(RepositoryContentResponse) resp, err := s.client.Do(ctx, req, deleteResponse) if err != nil { return nil, resp, err } + return deleteResponse, resp, nil } @@ -278,46 +300,22 @@ const ( // repository. The archiveFormat can be specified by either the github.Tarball // or github.Zipball constant. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/contents/#get-archive-link +// GitHub API docs: https://docs.github.com/en/rest/repos/contents/#get-archive-link func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo string, archiveformat ArchiveFormat, opts *RepositoryContentGetOptions, followRedirects bool) (*url.URL, *Response, error) { u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat) if opts != nil && opts.Ref != "" { u += fmt.Sprintf("/%s", opts.Ref) } - resp, err := s.getArchiveLinkFromURL(ctx, u, followRedirects) + resp, err := s.client.roundTripWithOptionalFollowRedirect(ctx, u, followRedirects) if err != nil { return nil, nil, err } + defer resp.Body.Close() + if resp.StatusCode != http.StatusFound { return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) } + parsedURL, err := url.Parse(resp.Header.Get("Location")) return parsedURL, newResponse(resp), err } - -func (s *RepositoriesService) getArchiveLinkFromURL(ctx context.Context, u string, followRedirects bool) (*http.Response, error) { - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - var resp *http.Response - // Use http.DefaultTransport if no custom Transport is configured - req = withContext(ctx, req) - if s.client.client.Transport == nil { - resp, err = http.DefaultTransport.RoundTrip(req) - } else { - resp, err = s.client.client.Transport.RoundTrip(req) - } - if err != nil { - return nil, err - } - resp.Body.Close() - - // If redirect response is returned, follow it - if followRedirects && resp.StatusCode == http.StatusMovedPermanently { - u = resp.Header.Get("Location") - resp, err = s.getArchiveLinkFromURL(ctx, u, false) - } - return resp, err -} diff --git a/vendor/github.com/google/go-github/v42/github/repos_deployments.go b/vendor/github.com/google/go-github/v45/github/repos_deployments.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/repos_deployments.go rename to vendor/github.com/google/go-github/v45/github/repos_deployments.go index 7308bcebe5..36445f895e 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_deployments.go +++ b/vendor/github.com/google/go-github/v45/github/repos_deployments.go @@ -63,7 +63,7 @@ type DeploymentsListOptions struct { // ListDeployments lists the deployments of a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-deployments +// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#list-deployments func (s *RepositoriesService) ListDeployments(ctx context.Context, owner, repo string, opts *DeploymentsListOptions) ([]*Deployment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) u, err := addOptions(u, opts) @@ -87,7 +87,7 @@ func (s *RepositoriesService) ListDeployments(ctx context.Context, owner, repo s // GetDeployment returns a single deployment of a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-deployment +// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#get-a-deployment func (s *RepositoriesService) GetDeployment(ctx context.Context, owner, repo string, deploymentID int64) (*Deployment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID) @@ -107,7 +107,7 @@ func (s *RepositoriesService) GetDeployment(ctx context.Context, owner, repo str // CreateDeployment creates a new deployment for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-deployment +// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#create-a-deployment func (s *RepositoriesService) CreateDeployment(ctx context.Context, owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo) @@ -131,7 +131,7 @@ func (s *RepositoriesService) CreateDeployment(ctx context.Context, owner, repo // DeleteDeployment deletes an existing deployment for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-deployment +// GitHub API docs: https://docs.github.com/en/rest/deployments/deployments#delete-a-deployment func (s *RepositoriesService) DeleteDeployment(ctx context.Context, owner, repo string, deploymentID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -175,7 +175,7 @@ type DeploymentStatusRequest struct { // ListDeploymentStatuses lists the statuses of a given deployment of a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-deployment-statuses +// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#list-deployment-statuses func (s *RepositoriesService) ListDeploymentStatuses(ctx context.Context, owner, repo string, deployment int64, opts *ListOptions) ([]*DeploymentStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) u, err := addOptions(u, opts) @@ -203,7 +203,7 @@ func (s *RepositoriesService) ListDeploymentStatuses(ctx context.Context, owner, // GetDeploymentStatus returns a single deployment status of a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-deployment-status +// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#get-a-deployment-status func (s *RepositoriesService) GetDeploymentStatus(ctx context.Context, owner, repo string, deploymentID, deploymentStatusID int64) (*DeploymentStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses/%v", owner, repo, deploymentID, deploymentStatusID) @@ -227,7 +227,7 @@ func (s *RepositoriesService) GetDeploymentStatus(ctx context.Context, owner, re // CreateDeploymentStatus creates a new status for a deployment. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-deployment-status +// GitHub API docs: https://docs.github.com/en/rest/deployments/statuses#create-a-deployment-status func (s *RepositoriesService) CreateDeploymentStatus(ctx context.Context, owner, repo string, deployment int64, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment) diff --git a/vendor/github.com/google/go-github/v42/github/repos_environments.go b/vendor/github.com/google/go-github/v45/github/repos_environments.go similarity index 89% rename from vendor/github.com/google/go-github/v42/github/repos_environments.go rename to vendor/github.com/google/go-github/v45/github/repos_environments.go index 25cb005e8d..365f8d9202 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_environments.go +++ b/vendor/github.com/google/go-github/v45/github/repos_environments.go @@ -63,6 +63,12 @@ type RequiredReviewer struct { Reviewer interface{} `json:"reviewer,omitempty"` } +// EnvironmentListOptions specifies the optional parameters to the +// RepositoriesService.ListEnvironments method. +type EnvironmentListOptions struct { + ListOptions +} + // UnmarshalJSON implements the json.Unmarshaler interface. // This helps us handle the fact that RequiredReviewer can have either a User or Team type reviewer field. func (r *RequiredReviewer) UnmarshalJSON(data []byte) error { @@ -98,9 +104,13 @@ func (r *RequiredReviewer) UnmarshalJSON(data []byte) error { // ListEnvironments lists all environments for a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#get-all-environments -func (s *RepositoriesService) ListEnvironments(ctx context.Context, owner, repo string) (*EnvResponse, *Response, error) { +// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#get-all-environments +func (s *RepositoriesService) ListEnvironments(ctx context.Context, owner, repo string, opts *EnvironmentListOptions) (*EnvResponse, *Response, error) { u := fmt.Sprintf("repos/%s/%s/environments", owner, repo) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } req, err := s.client.NewRequest("GET", u, nil) if err != nil { @@ -117,7 +127,7 @@ func (s *RepositoriesService) ListEnvironments(ctx context.Context, owner, repo // GetEnvironment get a single environment for a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#get-an-environment +// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#get-an-environment func (s *RepositoriesService) GetEnvironment(ctx context.Context, owner, repo, name string) (*Environment, *Response, error) { u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) @@ -160,7 +170,7 @@ type CreateUpdateEnvironment struct { // CreateUpdateEnvironment create or update a new environment for a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#create-or-update-an-environment +// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#create-or-update-an-environment func (s *RepositoriesService) CreateUpdateEnvironment(ctx context.Context, owner, repo, name string, environment *CreateUpdateEnvironment) (*Environment, *Response, error) { u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) @@ -179,7 +189,7 @@ func (s *RepositoriesService) CreateUpdateEnvironment(ctx context.Context, owner // DeleteEnvironment delete an environment from a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#delete-an-environment +// GitHub API docs: https://docs.github.com/en/rest/deployments/environments#delete-an-environment func (s *RepositoriesService) DeleteEnvironment(ctx context.Context, owner, repo, name string) (*Response, error) { u := fmt.Sprintf("repos/%s/%s/environments/%s", owner, repo, name) diff --git a/vendor/github.com/google/go-github/v42/github/repos_forks.go b/vendor/github.com/google/go-github/v45/github/repos_forks.go similarity index 92% rename from vendor/github.com/google/go-github/v42/github/repos_forks.go rename to vendor/github.com/google/go-github/v45/github/repos_forks.go index 74b9b445ea..97bb328ffb 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_forks.go +++ b/vendor/github.com/google/go-github/v45/github/repos_forks.go @@ -24,7 +24,7 @@ type RepositoryListForksOptions struct { // ListForks lists the forks of the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-forks +// GitHub API docs: https://docs.github.com/en/rest/repos/forks#list-forks func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string, opts *RepositoryListForksOptions) ([]*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) u, err := addOptions(u, opts) @@ -65,7 +65,7 @@ type RepositoryCreateForkOptions struct { // A follow up request, after a delay of a second or so, should result // in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-fork +// GitHub API docs: https://docs.github.com/en/rest/repos/forks#create-a-fork func (s *RepositoriesService) CreateFork(ctx context.Context, owner, repo string, opts *RepositoryCreateForkOptions) (*Repository, *Response, error) { u := fmt.Sprintf("repos/%v/%v/forks", owner, repo) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/repos_hooks.go b/vendor/github.com/google/go-github/v45/github/repos_hooks.go similarity index 68% rename from vendor/github.com/google/go-github/v42/github/repos_hooks.go rename to vendor/github.com/google/go-github/v45/github/repos_hooks.go index 6afec86039..4e738cfe8c 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_hooks.go +++ b/vendor/github.com/google/go-github/v45/github/repos_hooks.go @@ -18,57 +18,22 @@ import ( // here to account for these differences. // // GitHub API docs: https://help.github.com/articles/post-receive-hooks -type WebHookPayload struct { - Action *string `json:"action,omitempty"` - After *string `json:"after,omitempty"` - Before *string `json:"before,omitempty"` - Commits []*WebHookCommit `json:"commits,omitempty"` - Compare *string `json:"compare,omitempty"` - Created *bool `json:"created,omitempty"` - Deleted *bool `json:"deleted,omitempty"` - Forced *bool `json:"forced,omitempty"` - HeadCommit *WebHookCommit `json:"head_commit,omitempty"` - Installation *Installation `json:"installation,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Pusher *User `json:"pusher,omitempty"` - Ref *string `json:"ref,omitempty"` - Repo *Repository `json:"repository,omitempty"` - Sender *User `json:"sender,omitempty"` -} - -func (w WebHookPayload) String() string { - return Stringify(w) -} +// +// Deprecated: Please use PushEvent instead. +type WebHookPayload = PushEvent // WebHookCommit represents the commit variant we receive from GitHub in a // WebHookPayload. -type WebHookCommit struct { - Added []string `json:"added,omitempty"` - Author *WebHookAuthor `json:"author,omitempty"` - Committer *WebHookAuthor `json:"committer,omitempty"` - Distinct *bool `json:"distinct,omitempty"` - ID *string `json:"id,omitempty"` - Message *string `json:"message,omitempty"` - Modified []string `json:"modified,omitempty"` - Removed []string `json:"removed,omitempty"` - Timestamp *time.Time `json:"timestamp,omitempty"` -} - -func (w WebHookCommit) String() string { - return Stringify(w) -} +// +// Deprecated: Please use HeadCommit instead. +type WebHookCommit = HeadCommit // WebHookAuthor represents the author or committer of a commit, as specified // in a WebHookCommit. The commit author may not correspond to a GitHub User. -type WebHookAuthor struct { - Email *string `json:"email,omitempty"` - Name *string `json:"name,omitempty"` - Username *string `json:"username,omitempty"` -} - -func (w WebHookAuthor) String() string { - return Stringify(w) -} +// +// Deprecated: Please use CommitAuthor instead. +// NOTE Breaking API change: the `Username` field is now called `Login`. +type WebHookAuthor = CommitAuthor // Hook represents a GitHub (web and service) hook for a repository. type Hook struct { @@ -112,7 +77,7 @@ type createHookRequest struct { // Note that only a subset of the hook fields are used and hook must // not be nil. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#create-a-repository-webhook func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string, hook *Hook) (*Hook, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) @@ -139,7 +104,7 @@ func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string // ListHooks lists all Hooks for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-webhooks +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#list-repository-webhooks func (s *RepositoriesService) ListHooks(ctx context.Context, owner, repo string, opts *ListOptions) ([]*Hook, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo) u, err := addOptions(u, opts) @@ -163,7 +128,7 @@ func (s *RepositoriesService) ListHooks(ctx context.Context, owner, repo string, // GetHook returns a single specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#get-a-repository-webhook func (s *RepositoriesService) GetHook(ctx context.Context, owner, repo string, id int64) (*Hook, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) req, err := s.client.NewRequest("GET", u, nil) @@ -181,7 +146,7 @@ func (s *RepositoriesService) GetHook(ctx context.Context, owner, repo string, i // EditHook updates a specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#update-a-repository-webhook func (s *RepositoriesService) EditHook(ctx context.Context, owner, repo string, id int64, hook *Hook) (*Hook, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) req, err := s.client.NewRequest("PATCH", u, hook) @@ -199,7 +164,7 @@ func (s *RepositoriesService) EditHook(ctx context.Context, owner, repo string, // DeleteHook deletes a specified Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#delete-a-repository-webhook func (s *RepositoriesService) DeleteHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id) req, err := s.client.NewRequest("DELETE", u, nil) @@ -211,7 +176,7 @@ func (s *RepositoriesService) DeleteHook(ctx context.Context, owner, repo string // PingHook triggers a 'ping' event to be sent to the Hook. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#ping-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#ping-a-repository-webhook func (s *RepositoriesService) PingHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id) req, err := s.client.NewRequest("POST", u, nil) @@ -223,7 +188,7 @@ func (s *RepositoriesService) PingHook(ctx context.Context, owner, repo string, // TestHook triggers a test Hook by github. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#test-the-push-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repos#test-the-push-repository-webhook func (s *RepositoriesService) TestHook(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/repos_hooks_deliveries.go b/vendor/github.com/google/go-github/v45/github/repos_hooks_deliveries.go similarity index 75% rename from vendor/github.com/google/go-github/v42/github/repos_hooks_deliveries.go rename to vendor/github.com/google/go-github/v45/github/repos_hooks_deliveries.go index 1713269519..cbd2d3819a 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_hooks_deliveries.go +++ b/vendor/github.com/google/go-github/v45/github/repos_hooks_deliveries.go @@ -14,8 +14,8 @@ import ( // HookDelivery represents the data that is received from GitHub's Webhook Delivery API // // GitHub API docs: -// - https://docs.github.com/en/rest/reference/repos#list-deliveries-for-a-repository-webhook -// - https://docs.github.com/en/rest/reference/repos#get-a-delivery-for-a-repository-webhook +// - https://docs.github.com/en/rest/webhooks/repo-deliveries#list-deliveries-for-a-repository-webhook +// - https://docs.github.com/en/rest/webhooks/repo-deliveries#get-a-delivery-for-a-repository-webhook type HookDelivery struct { ID *int64 `json:"id,omitempty"` GUID *string `json:"guid,omitempty"` @@ -63,7 +63,7 @@ func (r HookResponse) String() string { // ListHookDeliveries lists webhook deliveries for a webhook configured in a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#list-deliveries-for-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#list-deliveries-for-a-repository-webhook func (s *RepositoriesService) ListHookDeliveries(ctx context.Context, owner, repo string, id int64, opts *ListCursorOptions) ([]*HookDelivery, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries", owner, repo, id) u, err := addOptions(u, opts) @@ -87,7 +87,7 @@ func (s *RepositoriesService) ListHookDeliveries(ctx context.Context, owner, rep // GetHookDelivery returns a delivery for a webhook configured in a repository. // -// GitHub API docs: https://docs.github.com/en/rest/reference/repos#get-a-delivery-for-a-repository-webhook +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#get-a-delivery-for-a-repository-webhook func (s *RepositoriesService) GetHookDelivery(ctx context.Context, owner, repo string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries/%v", owner, repo, hookID, deliveryID) req, err := s.client.NewRequest("GET", u, nil) @@ -104,6 +104,25 @@ func (s *RepositoriesService) GetHookDelivery(ctx context.Context, owner, repo s return h, resp, nil } +// RedeliverHookDelivery redelivers a delivery for a webhook configured in a repository. +// +// GitHub API docs: https://docs.github.com/en/rest/webhooks/repo-deliveries#redeliver-a-delivery-for-a-repository-webhook +func (s *RepositoriesService) RedeliverHookDelivery(ctx context.Context, owner, repo string, hookID, deliveryID int64) (*HookDelivery, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/hooks/%v/deliveries/%v/attempts", owner, repo, hookID, deliveryID) + req, err := s.client.NewRequest("POST", u, nil) + if err != nil { + return nil, nil, err + } + + h := new(HookDelivery) + resp, err := s.client.Do(ctx, req, h) + if err != nil { + return nil, resp, err + } + + return h, resp, nil +} + // ParseRequestPayload parses the request payload. For recognized event types, // a value of the corresponding struct type will be returned. func (d *HookDelivery) ParseRequestPayload() (interface{}, error) { diff --git a/vendor/github.com/google/go-github/v42/github/repos_invitations.go b/vendor/github.com/google/go-github/v45/github/repos_invitations.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/repos_invitations.go rename to vendor/github.com/google/go-github/v45/github/repos_invitations.go index 79dd57889b..81956cd49c 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_invitations.go +++ b/vendor/github.com/google/go-github/v45/github/repos_invitations.go @@ -27,7 +27,7 @@ type RepositoryInvitation struct { // ListInvitations lists all currently-open repository invitations. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-invitations +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#list-repository-invitations func (s *RepositoriesService) ListInvitations(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryInvitation, *Response, error) { u := fmt.Sprintf("repos/%v/%v/invitations", owner, repo) u, err := addOptions(u, opts) @@ -51,7 +51,7 @@ func (s *RepositoriesService) ListInvitations(ctx context.Context, owner, repo s // DeleteInvitation deletes a repository invitation. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-repository-invitation +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#delete-a-repository-invitation func (s *RepositoriesService) DeleteInvitation(ctx context.Context, owner, repo string, invitationID int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -68,7 +68,7 @@ func (s *RepositoriesService) DeleteInvitation(ctx context.Context, owner, repo // permissions represents the permissions that the associated user will have // on the repository. Possible values are: "read", "write", "admin". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-repository-invitation +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#update-a-repository-invitation func (s *RepositoriesService) UpdateInvitation(ctx context.Context, owner, repo string, invitationID int64, permissions string) (*RepositoryInvitation, *Response, error) { opts := &struct { Permissions string `json:"permissions"` diff --git a/vendor/github.com/google/go-github/v42/github/repos_keys.go b/vendor/github.com/google/go-github/v45/github/repos_keys.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/repos_keys.go rename to vendor/github.com/google/go-github/v45/github/repos_keys.go index 3e127ae435..42c5de4970 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_keys.go +++ b/vendor/github.com/google/go-github/v45/github/repos_keys.go @@ -14,7 +14,7 @@ import ( // ListKeys lists the deploy keys for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-deploy-keys +// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#list-deploy-keys func (s *RepositoriesService) ListKeys(ctx context.Context, owner string, repo string, opts *ListOptions) ([]*Key, *Response, error) { u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) u, err := addOptions(u, opts) @@ -38,7 +38,7 @@ func (s *RepositoriesService) ListKeys(ctx context.Context, owner string, repo s // GetKey fetches a single deploy key. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-deploy-key +// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#get-a-deploy-key func (s *RepositoriesService) GetKey(ctx context.Context, owner string, repo string, id int64) (*Key, *Response, error) { u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) @@ -58,7 +58,7 @@ func (s *RepositoriesService) GetKey(ctx context.Context, owner string, repo str // CreateKey adds a deploy key for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-deploy-key +// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#create-a-deploy-key func (s *RepositoriesService) CreateKey(ctx context.Context, owner string, repo string, key *Key) (*Key, *Response, error) { u := fmt.Sprintf("repos/%v/%v/keys", owner, repo) @@ -78,7 +78,7 @@ func (s *RepositoriesService) CreateKey(ctx context.Context, owner string, repo // DeleteKey deletes a deploy key. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-deploy-key +// GitHub API docs: https://docs.github.com/en/rest/deploy-keys#delete-a-deploy-key func (s *RepositoriesService) DeleteKey(ctx context.Context, owner string, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id) diff --git a/vendor/github.com/google/go-github/v45/github/repos_merging.go b/vendor/github.com/google/go-github/v45/github/repos_merging.go new file mode 100644 index 0000000000..66e88452e8 --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/repos_merging.go @@ -0,0 +1,72 @@ +// Copyright 2014 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// RepositoryMergeRequest represents a request to merge a branch in a +// repository. +type RepositoryMergeRequest struct { + Base *string `json:"base,omitempty"` + Head *string `json:"head,omitempty"` + CommitMessage *string `json:"commit_message,omitempty"` +} + +// RepoMergeUpstreamRequest represents a request to sync a branch of +// a forked repository to keep it up-to-date with the upstream repository. +type RepoMergeUpstreamRequest struct { + Branch *string `json:"branch,omitempty"` +} + +// RepoMergeUpstreamResult represents the result of syncing a branch of +// a forked repository with the upstream repository. +type RepoMergeUpstreamResult struct { + Message *string `json:"message,omitempty"` + MergeType *string `json:"merge_type,omitempty"` + BaseBranch *string `json:"base_branch,omitempty"` +} + +// Merge a branch in the specified repository. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branches#merge-a-branch +func (s *RepositoriesService) Merge(ctx context.Context, owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/merges", owner, repo) + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + commit := new(RepositoryCommit) + resp, err := s.client.Do(ctx, req, commit) + if err != nil { + return nil, resp, err + } + + return commit, resp, nil +} + +// MergeUpstream syncs a branch of a forked repository to keep it up-to-date +// with the upstream repository. +// +// GitHub API docs: https://docs.github.com/en/rest/branches/branches#sync-a-fork-branch-with-the-upstream-repository +func (s *RepositoriesService) MergeUpstream(ctx context.Context, owner, repo string, request *RepoMergeUpstreamRequest) (*RepoMergeUpstreamResult, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/merge-upstream", owner, repo) + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, nil, err + } + + result := new(RepoMergeUpstreamResult) + resp, err := s.client.Do(ctx, req, result) + if err != nil { + return nil, resp, err + } + + return result, resp, nil +} diff --git a/vendor/github.com/google/go-github/v42/github/repos_pages.go b/vendor/github.com/google/go-github/v45/github/repos_pages.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/repos_pages.go rename to vendor/github.com/google/go-github/v45/github/repos_pages.go index 04825baea1..9b864eb090 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_pages.go +++ b/vendor/github.com/google/go-github/v45/github/repos_pages.go @@ -63,7 +63,7 @@ type createPagesRequest struct { // EnablePages enables GitHub Pages for the named repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-github-pages-site +// GitHub API docs: https://docs.github.com/en/rest/pages#create-a-github-pages-site func (s *RepositoriesService) EnablePages(ctx context.Context, owner, repo string, pages *Pages) (*Pages, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) @@ -95,11 +95,18 @@ type PagesUpdate struct { // Source must include the branch name, and may optionally specify the subdirectory "/docs". // Possible values are: "gh-pages", "master", and "master /docs". Source *string `json:"source,omitempty"` + // Public configures access controls for the site. + // If "true", the site will be accessible to anyone on the internet. If "false", + // the site will be accessible to anyone with read access to the repository that + // published the site. + Public *bool `json:"public,omitempty"` + // HTTPSEnforced specifies whether HTTPS should be enforced for the repository. + HTTPSEnforced *bool `json:"https_enforced,omitempty"` } // UpdatePages updates GitHub Pages for the named repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-information-about-a-github-pages-site +// GitHub API docs: https://docs.github.com/en/rest/pages#update-information-about-a-github-pages-site func (s *RepositoriesService) UpdatePages(ctx context.Context, owner, repo string, opts *PagesUpdate) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) @@ -118,7 +125,7 @@ func (s *RepositoriesService) UpdatePages(ctx context.Context, owner, repo strin // DisablePages disables GitHub Pages for the named repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-github-pages-site +// GitHub API docs: https://docs.github.com/en/rest/pages#delete-a-github-pages-site func (s *RepositoriesService) DisablePages(ctx context.Context, owner, repo string) (*Response, error) { u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) @@ -134,7 +141,7 @@ func (s *RepositoriesService) DisablePages(ctx context.Context, owner, repo stri // GetPagesInfo fetches information about a GitHub Pages site. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-github-pages-site +// GitHub API docs: https://docs.github.com/en/rest/pages#get-a-github-pages-site func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo string) (*Pages, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -153,7 +160,7 @@ func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo stri // ListPagesBuilds lists the builds for a GitHub Pages site. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-github-pages-builds +// GitHub API docs: https://docs.github.com/en/rest/pages#list-github-pages-builds func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string, opts *ListOptions) ([]*PagesBuild, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) u, err := addOptions(u, opts) @@ -177,7 +184,7 @@ func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo s // GetLatestPagesBuild fetches the latest build information for a GitHub pages site. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-latest-pages-build +// GitHub API docs: https://docs.github.com/en/rest/pages#get-latest-pages-build func (s *RepositoriesService) GetLatestPagesBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -196,7 +203,7 @@ func (s *RepositoriesService) GetLatestPagesBuild(ctx context.Context, owner, re // GetPageBuild fetches the specific build information for a GitHub pages site. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-github-pages-build +// GitHub API docs: https://docs.github.com/en/rest/pages#get-github-pages-build func (s *RepositoriesService) GetPageBuild(ctx context.Context, owner, repo string, id int64) (*PagesBuild, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id) req, err := s.client.NewRequest("GET", u, nil) @@ -215,7 +222,7 @@ func (s *RepositoriesService) GetPageBuild(ctx context.Context, owner, repo stri // RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#request-a-github-pages-build +// GitHub API docs: https://docs.github.com/en/rest/pages#request-a-github-pages-build func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) { u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo) req, err := s.client.NewRequest("POST", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/v45/github/repos_prereceive_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/repos_prereceive_hooks.go rename to vendor/github.com/google/go-github/v45/github/repos_prereceive_hooks.go diff --git a/vendor/github.com/google/go-github/v42/github/repos_projects.go b/vendor/github.com/google/go-github/v45/github/repos_projects.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/repos_projects.go rename to vendor/github.com/google/go-github/v45/github/repos_projects.go index 1938d51b9b..a3001dee98 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_projects.go +++ b/vendor/github.com/google/go-github/v45/github/repos_projects.go @@ -21,7 +21,7 @@ type ProjectListOptions struct { // ListProjects lists the projects for a repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-repository-projects +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-repository-projects func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo string, opts *ProjectListOptions) ([]*Project, *Response, error) { u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) u, err := addOptions(u, opts) @@ -48,7 +48,7 @@ func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo stri // CreateProject creates a GitHub Project for the specified repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#create-a-repository-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-repository-project func (s *RepositoriesService) CreateProject(ctx context.Context, owner, repo string, opts *ProjectOptions) (*Project, *Response, error) { u := fmt.Sprintf("repos/%v/%v/projects", owner, repo) req, err := s.client.NewRequest("POST", u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/repos_releases.go b/vendor/github.com/google/go-github/v45/github/repos_releases.go similarity index 90% rename from vendor/github.com/google/go-github/v42/github/repos_releases.go rename to vendor/github.com/google/go-github/v45/github/repos_releases.go index 1cd2fae669..f1ab65c185 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_releases.go +++ b/vendor/github.com/google/go-github/v45/github/repos_releases.go @@ -83,7 +83,7 @@ func (r ReleaseAsset) String() string { // ListReleases lists the releases for a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-releases +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#list-releases func (s *RepositoriesService) ListReleases(ctx context.Context, owner, repo string, opts *ListOptions) ([]*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) u, err := addOptions(u, opts) @@ -106,7 +106,7 @@ func (s *RepositoriesService) ListReleases(ctx context.Context, owner, repo stri // GetRelease fetches a single release. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-release +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-a-release func (s *RepositoriesService) GetRelease(ctx context.Context, owner, repo string, id int64) (*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) return s.getSingleRelease(ctx, u) @@ -114,7 +114,7 @@ func (s *RepositoriesService) GetRelease(ctx context.Context, owner, repo string // GetLatestRelease fetches the latest published release for the repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-latest-release +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-the-latest-release func (s *RepositoriesService) GetLatestRelease(ctx context.Context, owner, repo string) (*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo) return s.getSingleRelease(ctx, u) @@ -122,15 +122,15 @@ func (s *RepositoriesService) GetLatestRelease(ctx context.Context, owner, repo // GetReleaseByTag fetches a release with the specified tag. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-release-by-tag-name +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#get-a-release-by-tag-name func (s *RepositoriesService) GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag) return s.getSingleRelease(ctx, u) } // GenerateReleaseNotes generates the release notes for the given tag. -// TODO: api docs -// GitHub API docs: +// +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#generate-release-notes-content-for-a-release func (s *RepositoriesService) GenerateReleaseNotes(ctx context.Context, owner, repo string, opts *GenerateNotesOptions) (*RepositoryReleaseNotes, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/generate-notes", owner, repo) req, err := s.client.NewRequest("POST", u, opts) @@ -183,7 +183,7 @@ type repositoryReleaseRequest struct { // Note that only a subset of the release fields are used. // See RepositoryRelease for more information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-release +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#create-a-release func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases", owner, repo) @@ -216,7 +216,7 @@ func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo str // Note that only a subset of the release fields are used. // See RepositoryRelease for more information. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-release +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#update-a-release func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo string, id int64, release *RepositoryRelease) (*RepositoryRelease, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) @@ -246,7 +246,7 @@ func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo strin // DeleteRelease delete a single release from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-release +// GitHub API docs: https://docs.github.com/en/rest/releases/releases#delete-a-release func (s *RepositoriesService) DeleteRelease(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id) @@ -259,7 +259,7 @@ func (s *RepositoriesService) DeleteRelease(ctx context.Context, owner, repo str // ListReleaseAssets lists the release's assets. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-release-assets +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#list-release-assets func (s *RepositoriesService) ListReleaseAssets(ctx context.Context, owner, repo string, id int64, opts *ListOptions) ([]*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) u, err := addOptions(u, opts) @@ -282,7 +282,7 @@ func (s *RepositoriesService) ListReleaseAssets(ctx context.Context, owner, repo // GetReleaseAsset fetches a single release asset. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-release-asset +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#get-a-release-asset func (s *RepositoriesService) GetReleaseAsset(ctx context.Context, owner, repo string, id int64) (*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) @@ -311,7 +311,7 @@ func (s *RepositoriesService) GetReleaseAsset(ctx context.Context, owner, repo s // exist, but it's possible to pass any http.Client. If nil is passed the // redirectURL will be returned instead. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-a-release-asset +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#get-a-release-asset func (s *RepositoriesService) DownloadReleaseAsset(ctx context.Context, owner, repo string, id int64, followRedirectsClient *http.Client) (rc io.ReadCloser, redirectURL string, err error) { u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) @@ -373,7 +373,7 @@ func (s *RepositoriesService) downloadReleaseAssetFromURL(ctx context.Context, f // EditReleaseAsset edits a repository release asset. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#update-a-release-asset +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#update-a-release-asset func (s *RepositoriesService) EditReleaseAsset(ctx context.Context, owner, repo string, id int64, release *ReleaseAsset) (*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) @@ -392,7 +392,7 @@ func (s *RepositoriesService) EditReleaseAsset(ctx context.Context, owner, repo // DeleteReleaseAsset delete a single release asset from a repository. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#delete-a-release-asset +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#delete-a-release-asset func (s *RepositoriesService) DeleteReleaseAsset(ctx context.Context, owner, repo string, id int64) (*Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id) @@ -406,7 +406,7 @@ func (s *RepositoriesService) DeleteReleaseAsset(ctx context.Context, owner, rep // UploadReleaseAsset creates an asset by uploading a file into a release repository. // To upload assets that cannot be represented by an os.File, call NewUploadRequest directly. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#upload-a-release-asset +// GitHub API docs: https://docs.github.com/en/rest/releases/assets#upload-a-release-asset func (s *RepositoriesService) UploadReleaseAsset(ctx context.Context, owner, repo string, id int64, opts *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) { u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/repos_stats.go b/vendor/github.com/google/go-github/v45/github/repos_stats.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/repos_stats.go rename to vendor/github.com/google/go-github/v45/github/repos_stats.go index 73f0a6768a..3df0a8f6de 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_stats.go +++ b/vendor/github.com/google/go-github/v45/github/repos_stats.go @@ -45,7 +45,7 @@ func (w WeeklyStats) String() string { // it is now computing the requested statistics. A follow up request, after a // delay of a second or so, should result in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-all-contributor-commit-activity +// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-all-contributor-commit-activity func (s *RepositoriesService) ListContributorsStats(ctx context.Context, owner, repo string) ([]*ContributorStats, *Response, error) { u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -84,7 +84,7 @@ func (w WeeklyCommitActivity) String() string { // it is now computing the requested statistics. A follow up request, after a // delay of a second or so, should result in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-last-year-of-commit-activity +// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-last-year-of-commit-activity func (s *RepositoriesService) ListCommitActivity(ctx context.Context, owner, repo string) ([]*WeeklyCommitActivity, *Response, error) { u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -111,7 +111,7 @@ func (s *RepositoriesService) ListCommitActivity(ctx context.Context, owner, rep // it is now computing the requested statistics. A follow up request, after a // delay of a second or so, should result in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-weekly-commit-activity +// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-weekly-commit-activity func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo string) ([]*WeeklyStats, *Response, error) { u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -121,6 +121,9 @@ func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo var weeks [][]int resp, err := s.client.Do(ctx, req, &weeks) + if err != nil { + return nil, resp, err + } // convert int slices into WeeklyStats var stats []*WeeklyStats @@ -136,7 +139,7 @@ func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo stats = append(stats, stat) } - return stats, resp, err + return stats, resp, nil } // RepositoryParticipation is the number of commits by everyone @@ -164,7 +167,7 @@ func (r RepositoryParticipation) String() string { // it is now computing the requested statistics. A follow up request, after a // delay of a second or so, should result in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-weekly-commit-count +// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-weekly-commit-count func (s *RepositoriesService) ListParticipation(ctx context.Context, owner, repo string) (*RepositoryParticipation, *Response, error) { u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -197,7 +200,7 @@ type PunchCard struct { // it is now computing the requested statistics. A follow up request, after a // delay of a second or so, should result in a successful request. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-hourly-commit-count-for-each-day +// GitHub API docs: https://docs.github.com/en/rest/metrics/statistics#get-the-hourly-commit-count-for-each-day func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo string) ([]*PunchCard, *Response, error) { u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -207,6 +210,9 @@ func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo str var results [][]int resp, err := s.client.Do(ctx, req, &results) + if err != nil { + return nil, resp, err + } // convert int slices into Punchcards var cards []*PunchCard @@ -222,5 +228,5 @@ func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo str cards = append(cards, card) } - return cards, resp, err + return cards, resp, nil } diff --git a/vendor/github.com/google/go-github/v42/github/repos_statuses.go b/vendor/github.com/google/go-github/v45/github/repos_statuses.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/repos_statuses.go rename to vendor/github.com/google/go-github/v45/github/repos_statuses.go index 347d856ae2..42238f3c9d 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_statuses.go +++ b/vendor/github.com/google/go-github/v45/github/repos_statuses.go @@ -46,7 +46,7 @@ func (r RepoStatus) String() string { // ListStatuses lists the statuses of a repository at the specified // reference. ref can be a SHA, a branch name, or a tag name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-commit-statuses-for-a-reference +// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#list-commit-statuses-for-a-reference func (s *RepositoriesService) ListStatuses(ctx context.Context, owner, repo, ref string, opts *ListOptions) ([]*RepoStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, refURLEscape(ref)) u, err := addOptions(u, opts) @@ -71,7 +71,7 @@ func (s *RepositoriesService) ListStatuses(ctx context.Context, owner, repo, ref // CreateStatus creates a new status for a repository at the specified // reference. Ref can be a SHA, a branch name, or a tag name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#create-a-commit-status +// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#create-a-commit-status func (s *RepositoriesService) CreateStatus(ctx context.Context, owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, refURLEscape(ref)) req, err := s.client.NewRequest("POST", u, status) @@ -110,7 +110,7 @@ func (s CombinedStatus) String() string { // GetCombinedStatus returns the combined status of a repository at the specified // reference. ref can be a SHA, a branch name, or a tag name. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-the-combined-status-for-a-specific-reference +// GitHub API docs: https://docs.github.com/en/rest/commits/statuses#get-the-combined-status-for-a-specific-reference func (s *RepositoriesService) GetCombinedStatus(ctx context.Context, owner, repo, ref string, opts *ListOptions) (*CombinedStatus, *Response, error) { u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, refURLEscape(ref)) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/repos_traffic.go b/vendor/github.com/google/go-github/v45/github/repos_traffic.go similarity index 90% rename from vendor/github.com/google/go-github/v42/github/repos_traffic.go rename to vendor/github.com/google/go-github/v45/github/repos_traffic.go index e372fb5a7f..bf093c03ea 100644 --- a/vendor/github.com/google/go-github/v42/github/repos_traffic.go +++ b/vendor/github.com/google/go-github/v45/github/repos_traffic.go @@ -54,7 +54,7 @@ type TrafficBreakdownOptions struct { // ListTrafficReferrers list the top 10 referrers over the last 14 days. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-top-referral-sources +// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-top-referral-sources func (s *RepositoriesService) ListTrafficReferrers(ctx context.Context, owner, repo string) ([]*TrafficReferrer, *Response, error) { u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo) @@ -74,7 +74,7 @@ func (s *RepositoriesService) ListTrafficReferrers(ctx context.Context, owner, r // ListTrafficPaths list the top 10 popular content over the last 14 days. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-top-referral-paths +// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-top-referral-paths func (s *RepositoriesService) ListTrafficPaths(ctx context.Context, owner, repo string) ([]*TrafficPath, *Response, error) { u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo) @@ -94,7 +94,7 @@ func (s *RepositoriesService) ListTrafficPaths(ctx context.Context, owner, repo // ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-page-views +// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-page-views func (s *RepositoriesService) ListTrafficViews(ctx context.Context, owner, repo string, opts *TrafficBreakdownOptions) (*TrafficViews, *Response, error) { u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo) u, err := addOptions(u, opts) @@ -118,7 +118,7 @@ func (s *RepositoriesService) ListTrafficViews(ctx context.Context, owner, repo // ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#get-repository-clones +// GitHub API docs: https://docs.github.com/en/rest/metrics/traffic#get-repository-clones func (s *RepositoriesService) ListTrafficClones(ctx context.Context, owner, repo string, opts *TrafficBreakdownOptions) (*TrafficClones, *Response, error) { u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/scim.go b/vendor/github.com/google/go-github/v45/github/scim.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/scim.go rename to vendor/github.com/google/go-github/v45/github/scim.go index 7a12d85b88..c4abb9ab3e 100644 --- a/vendor/github.com/google/go-github/v42/github/scim.go +++ b/vendor/github.com/google/go-github/v45/github/scim.go @@ -14,12 +14,12 @@ import ( // SCIMService provides access to SCIM related functions in the // GitHub API. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim +// GitHub API docs: https://docs.github.com/en/rest/scim type SCIMService service // SCIMUserAttributes represents supported SCIM User attributes. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#supported-scim-user-attributes +// GitHub API docs: https://docs.github.com/en/rest/scim#supported-scim-user-attributes type SCIMUserAttributes struct { UserName string `json:"userName"` // Configured by the admin. Could be an email, login, or username. (Required.) Name SCIMUserName `json:"name"` // (Required.) @@ -47,7 +47,7 @@ type SCIMUserEmail struct { // ListSCIMProvisionedIdentitiesOptions represents options for ListSCIMProvisionedIdentities. // -// Github API docs: https://docs.github.com/en/rest/reference/scim#list-scim-provisioned-identities--parameters +// Github API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities--parameters type ListSCIMProvisionedIdentitiesOptions struct { StartIndex *int `json:"startIndex,omitempty"` // Used for pagination: the index of the first result to return. (Optional.) Count *int `json:"count,omitempty"` // Used for pagination: the number of results to return. (Optional.) @@ -61,7 +61,7 @@ type ListSCIMProvisionedIdentitiesOptions struct { // ListSCIMProvisionedIdentities lists SCIM provisioned identities. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#list-scim-provisioned-identities +// GitHub API docs: https://docs.github.com/en/rest/scim#list-scim-provisioned-identities func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org string, opts *ListSCIMProvisionedIdentitiesOptions) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) u, err := addOptions(u, opts) @@ -77,7 +77,7 @@ func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org str // ProvisionAndInviteSCIMUser provisions organization membership for a user, and sends an activation email to the email address. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#provision-and-invite-a-scim-user +// GitHub API docs: https://docs.github.com/en/rest/scim#provision-and-invite-a-scim-user func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string, opts *SCIMUserAttributes) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) u, err := addOptions(u, opts) @@ -93,7 +93,7 @@ func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string // GetSCIMProvisioningInfoForUser returns SCIM provisioning information for a user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#get-scim-provisioning-information-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/scim#supported-scim-user-attributes func (s *SCIMService) GetSCIMProvisioningInfoForUser(ctx context.Context, org, scimUserID string) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) req, err := s.client.NewRequest("GET", u, nil) @@ -105,7 +105,7 @@ func (s *SCIMService) GetSCIMProvisioningInfoForUser(ctx context.Context, org, s // UpdateProvisionedOrgMembership updates a provisioned organization membership. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#update-a-provisioned-organization-membership +// GitHub API docs: https://docs.github.com/en/rest/scim#update-a-provisioned-organization-membership func (s *SCIMService) UpdateProvisionedOrgMembership(ctx context.Context, org, scimUserID string, opts *SCIMUserAttributes) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) u, err := addOptions(u, opts) @@ -121,7 +121,7 @@ func (s *SCIMService) UpdateProvisionedOrgMembership(ctx context.Context, org, s // UpdateAttributeForSCIMUserOptions represents options for UpdateAttributeForSCIMUser. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#update-an-attribute-for-a-scim-user--parameters +// GitHub API docs: https://docs.github.com/en/rest/scim#update-an-attribute-for-a-scim-user--parameters type UpdateAttributeForSCIMUserOptions struct { Schemas []string `json:"schemas,omitempty"` // (Optional.) Operations UpdateAttributeForSCIMUserOperations `json:"operations"` // Set of operations to be performed. (Required.) @@ -136,7 +136,7 @@ type UpdateAttributeForSCIMUserOperations struct { // UpdateAttributeForSCIMUser updates an attribute for an SCIM user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#update-an-attribute-for-a-scim-user +// GitHub API docs: https://docs.github.com/en/rest/scim#update-an-attribute-for-a-scim-user func (s *SCIMService) UpdateAttributeForSCIMUser(ctx context.Context, org, scimUserID string, opts *UpdateAttributeForSCIMUserOptions) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) u, err := addOptions(u, opts) @@ -152,7 +152,7 @@ func (s *SCIMService) UpdateAttributeForSCIMUser(ctx context.Context, org, scimU // DeleteSCIMUserFromOrg deletes SCIM user from an organization. // -// GitHub API docs: https://docs.github.com/en/rest/reference/scim#delete-a-scim-user-from-an-organization +// GitHub API docs: https://docs.github.com/en/rest/scim#delete-a-scim-user-from-an-organization func (s *SCIMService) DeleteSCIMUserFromOrg(ctx context.Context, org, scimUserID string) (*Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users/%v", org, scimUserID) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/search.go b/vendor/github.com/google/go-github/v45/github/search.go similarity index 91% rename from vendor/github.com/google/go-github/v42/github/search.go rename to vendor/github.com/google/go-github/v45/github/search.go index 19aa892798..344f1bb985 100644 --- a/vendor/github.com/google/go-github/v42/github/search.go +++ b/vendor/github.com/google/go-github/v45/github/search.go @@ -29,7 +29,7 @@ import ( // For example, querying with "language:c++" and "leveldb", then query should be // "language:c++ leveldb" but not "language:c+++leveldb". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/ +// GitHub API docs: https://docs.github.com/en/rest/search/ type SearchService service // SearchOptions specifies optional parameters to the SearchService methods. @@ -69,11 +69,15 @@ type RepositoriesSearchResult struct { // Repositories searches repositories via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-repositories +// GitHub API docs: https://docs.github.com/en/rest/search#search-repositories func (s *SearchService) Repositories(ctx context.Context, query string, opts *SearchOptions) (*RepositoriesSearchResult, *Response, error) { result := new(RepositoriesSearchResult) resp, err := s.search(ctx, "repositories", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // TopicsSearchResult represents the result of a topics search. @@ -100,11 +104,15 @@ type TopicResult struct { // Please see https://help.github.com/en/articles/searching-topics for more // information about search qualifiers. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-topics +// GitHub API docs: https://docs.github.com/en/rest/search#search-topics func (s *SearchService) Topics(ctx context.Context, query string, opts *SearchOptions) (*TopicsSearchResult, *Response, error) { result := new(TopicsSearchResult) resp, err := s.search(ctx, "topics", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // CommitsSearchResult represents the result of a commits search. @@ -131,11 +139,15 @@ type CommitResult struct { // Commits searches commits via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-commits +// GitHub API docs: https://docs.github.com/en/rest/search#search-commits func (s *SearchService) Commits(ctx context.Context, query string, opts *SearchOptions) (*CommitsSearchResult, *Response, error) { result := new(CommitsSearchResult) resp, err := s.search(ctx, "commits", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // IssuesSearchResult represents the result of an issues search. @@ -147,11 +159,15 @@ type IssuesSearchResult struct { // Issues searches issues via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-issues-and-pull-requests +// GitHub API docs: https://docs.github.com/en/rest/search#search-issues-and-pull-requests func (s *SearchService) Issues(ctx context.Context, query string, opts *SearchOptions) (*IssuesSearchResult, *Response, error) { result := new(IssuesSearchResult) resp, err := s.search(ctx, "issues", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // UsersSearchResult represents the result of a users search. @@ -163,11 +179,15 @@ type UsersSearchResult struct { // Users searches users via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-users +// GitHub API docs: https://docs.github.com/en/rest/search#search-users func (s *SearchService) Users(ctx context.Context, query string, opts *SearchOptions) (*UsersSearchResult, *Response, error) { result := new(UsersSearchResult) resp, err := s.search(ctx, "users", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // Match represents a single text match. @@ -212,11 +232,15 @@ func (c CodeResult) String() string { // Code searches code via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-code +// GitHub API docs: https://docs.github.com/en/rest/search#search-code func (s *SearchService) Code(ctx context.Context, query string, opts *SearchOptions) (*CodeSearchResult, *Response, error) { result := new(CodeSearchResult) resp, err := s.search(ctx, "code", &searchParameters{Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // LabelsSearchResult represents the result of a code search. @@ -243,11 +267,15 @@ func (l LabelResult) String() string { // Labels searches labels in the repository with ID repoID via various criteria. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#search-labels +// GitHub API docs: https://docs.github.com/en/rest/search#search-labels func (s *SearchService) Labels(ctx context.Context, repoID int64, query string, opts *SearchOptions) (*LabelsSearchResult, *Response, error) { result := new(LabelsSearchResult) resp, err := s.search(ctx, "labels", &searchParameters{RepositoryID: &repoID, Query: query}, opts, result) - return result, resp, err + if err != nil { + return nil, resp, err + } + + return result, resp, nil } // Helper function that executes search queries against different @@ -260,6 +288,7 @@ func (s *SearchService) search(ctx context.Context, searchType string, parameter if err != nil { return nil, err } + if parameters.RepositoryID != nil { params.Set("repository_id", strconv.FormatInt(*parameters.RepositoryID, 10)) } diff --git a/vendor/github.com/google/go-github/v45/github/secret_scanning.go b/vendor/github.com/google/go-github/v45/github/secret_scanning.go new file mode 100644 index 0000000000..ec64950a67 --- /dev/null +++ b/vendor/github.com/google/go-github/v45/github/secret_scanning.go @@ -0,0 +1,232 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// SecretScanningService handles communication with the secret scanning related +// methods of the GitHub API. +type SecretScanningService service + +// SecretScanningAlert represents a GitHub secret scanning alert. +type SecretScanningAlert struct { + Number *int `json:"number,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + URL *string `json:"url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + LocationsURL *string `json:"locations_url,omitempty"` + State *string `json:"state,omitempty"` + Resolution *string `json:"resolution,omitempty"` + ResolvedAt *Timestamp `json:"resolved_at,omitempty"` + ResolvedBy *User `json:"resolved_by,omitempty"` + SecretType *string `json:"secret_type,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// SecretScanningAlertLocation represents the location for a secret scanning alert. +type SecretScanningAlertLocation struct { + Type *string `json:"type,omitempty"` + Details *SecretScanningAlertLocationDetails `json:"details,omitempty"` +} + +// SecretScanningAlertLocationDetails represents the location details for a secret scanning alert. +type SecretScanningAlertLocationDetails struct { + Path *string `json:"path,omitempty"` + Startline *int `json:"start_line,omitempty"` + EndLine *int `json:"end_line,omitempty"` + StartColumn *int `json:"start_column,omitempty"` + EndColumn *int `json:"end_column,omitempty"` + BlobSHA *string `json:"blob_sha,omitempty"` + BlobURL *string `json:"blob_url,omitempty"` + CommitSHA *string `json:"commit_sha,omitempty"` + CommitURL *string `json:"commit_url,omitempty"` +} + +// SecretScanningAlertListOptions specifies optional parameters to the SecretScanningService.ListAlertsForEnterprise method. +type SecretScanningAlertListOptions struct { + // State of the secret scanning alerts to list. Set to open or resolved to only list secret scanning alerts in a specific state. + State string `url:"state,omitempty"` + + // A comma-separated list of secret types to return. By default all secret types are returned. + SecretType string `url:"secret_type,omitempty"` + + // A comma-separated list of resolutions. Only secret scanning alerts with one of these resolutions are listed. + // Valid resolutions are false_positive, wont_fix, revoked, pattern_edited, pattern_deleted or used_in_tests. + Resolution string `url:"resolution,omitempty"` + + ListCursorOptions +} + +// SecretScanningAlertUpdateOptions specifies optional parameters to the SecretScanningService.UpdateAlert method. +type SecretScanningAlertUpdateOptions struct { + // Required. Sets the state of the secret scanning alert. Can be either open or resolved. + // You must provide resolution when you set the state to resolved. + State *string `url:"state,omitempty"` + + // A comma-separated list of secret types to return. By default all secret types are returned. + SecretType *string `url:"secret_type,omitempty"` + + // Required when the state is resolved. The reason for resolving the alert. Can be one of false_positive, + // wont_fix, revoked, or used_in_tests. + Resolution *string `url:"resolution,omitempty"` +} + +// Lists secret scanning alerts for eligible repositories in an enterprise, from newest to oldest. +// +// To use this endpoint, you must be a member of the enterprise, and you must use an access token with the repo scope or +// security_events scope. Alerts are only returned for organizations in the enterprise for which you are an organization owner or a security manager. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-an-enterprise +func (s *SecretScanningService) ListAlertsForEnterprise(ctx context.Context, enterprise string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { + u := fmt.Sprintf("enterprises/%v/secret-scanning/alerts", enterprise) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alerts []*SecretScanningAlert + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + + return alerts, resp, nil +} + +// Lists secret scanning alerts for eligible repositories in an organization, from newest to oldest. +// +// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with +// the repo scope or security_events scope. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-an-organization +func (s *SecretScanningService) ListAlertsForOrg(ctx context.Context, org string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { + u := fmt.Sprintf("orgs/%v/secret-scanning/alerts", org) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alerts []*SecretScanningAlert + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + + return alerts, resp, nil +} + +// Lists secret scanning alerts for a private repository, from newest to oldest. +// +// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with +// the repo scope or security_events scope. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-secret-scanning-alerts-for-a-repository +func (s *SecretScanningService) ListAlertsForRepo(ctx context.Context, owner, repo string, opts *SecretScanningAlertListOptions) ([]*SecretScanningAlert, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts", owner, repo) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alerts []*SecretScanningAlert + resp, err := s.client.Do(ctx, req, &alerts) + if err != nil { + return nil, resp, err + } + + return alerts, resp, nil +} + +// Gets a single secret scanning alert detected in a private repository. +// +// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with +// the repo scope or security_events scope. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#get-a-secret-scanning-alert +func (s *SecretScanningService) GetAlert(ctx context.Context, owner, repo string, number int64) (*SecretScanningAlert, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v", owner, repo, number) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var alert *SecretScanningAlert + resp, err := s.client.Do(ctx, req, &alert) + if err != nil { + return nil, resp, err + } + + return alert, resp, nil +} + +// Updates the status of a secret scanning alert in a private repository. +// +// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with +// the repo scope or security_events scope. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#update-a-secret-scanning-alert +func (s *SecretScanningService) UpdateAlert(ctx context.Context, owner, repo string, number int64, opts *SecretScanningAlertUpdateOptions) (*SecretScanningAlert, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v", owner, repo, number) + + req, err := s.client.NewRequest("PATCH", u, opts) + if err != nil { + return nil, nil, err + } + + var alert *SecretScanningAlert + resp, err := s.client.Do(ctx, req, &alert) + if err != nil { + return nil, resp, err + } + + return alert, resp, nil +} + +// Lists all locations for a given secret scanning alert for a private repository. +// +// To use this endpoint, you must be an administrator for the repository or organization, and you must use an access token with +// the repo scope or security_events scope. +// +// GitHub API docs: https://docs.github.com/en/enterprise-server@3.5/rest/secret-scanning#list-locations-for-a-secret-scanning-alert +func (s *SecretScanningService) ListLocationsForAlert(ctx context.Context, owner, repo string, number int64, opts *ListOptions) ([]*SecretScanningAlertLocation, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/secret-scanning/alerts/%v/locations", owner, repo, number) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var locations []*SecretScanningAlertLocation + resp, err := s.client.Do(ctx, req, &locations) + if err != nil { + return nil, resp, err + } + + return locations, resp, nil +} diff --git a/vendor/github.com/google/go-github/v42/github/strings.go b/vendor/github.com/google/go-github/v45/github/strings.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/strings.go rename to vendor/github.com/google/go-github/v45/github/strings.go diff --git a/vendor/github.com/google/go-github/v42/github/teams.go b/vendor/github.com/google/go-github/v45/github/teams.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/teams.go rename to vendor/github.com/google/go-github/v45/github/teams.go index 82d4093b47..38845e0953 100644 --- a/vendor/github.com/google/go-github/v42/github/teams.go +++ b/vendor/github.com/google/go-github/v45/github/teams.go @@ -16,7 +16,7 @@ import ( // TeamsService provides access to the team-related functions // in the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/ +// GitHub API docs: https://docs.github.com/en/rest/teams/ type TeamsService service // Team represents a team within a GitHub organization. Teams are used to @@ -82,7 +82,7 @@ func (i Invitation) String() string { // ListTeams lists all of the teams for an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-teams +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-teams func (s *TeamsService) ListTeams(ctx context.Context, org string, opts *ListOptions) ([]*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/teams", org) u, err := addOptions(u, opts) @@ -106,7 +106,7 @@ func (s *TeamsService) ListTeams(ctx context.Context, org string, opts *ListOpti // GetTeamByID fetches a team, given a specified organization ID, by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-team-by-name +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#get-a-team-by-name func (s *TeamsService) GetTeamByID(ctx context.Context, orgID, teamID int64) (*Team, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) req, err := s.client.NewRequest("GET", u, nil) @@ -125,7 +125,7 @@ func (s *TeamsService) GetTeamByID(ctx context.Context, orgID, teamID int64) (*T // GetTeamBySlug fetches a team, given a specified organization name, by slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-team-by-name +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#get-a-team-by-name func (s *TeamsService) GetTeamBySlug(ctx context.Context, org, slug string) (*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) req, err := s.client.NewRequest("GET", u, nil) @@ -175,7 +175,7 @@ func (s NewTeam) String() string { // CreateTeam creates a new team within an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#create-a-team func (s *TeamsService) CreateTeam(ctx context.Context, org string, team NewTeam) (*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/teams", org) req, err := s.client.NewRequest("POST", u, team) @@ -221,7 +221,7 @@ func copyNewTeamWithoutParent(team *NewTeam) *newTeamNoParent { // EditTeamByID edits a team, given an organization ID, selected by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#update-a-team func (s *TeamsService) EditTeamByID(ctx context.Context, orgID, teamID int64, team NewTeam, removeParent bool) (*Team, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) @@ -248,7 +248,7 @@ func (s *TeamsService) EditTeamByID(ctx context.Context, orgID, teamID int64, te // EditTeamBySlug edits a team, given an organization name, by slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#update-a-team func (s *TeamsService) EditTeamBySlug(ctx context.Context, org, slug string, team NewTeam, removeParent bool) (*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) @@ -275,7 +275,7 @@ func (s *TeamsService) EditTeamBySlug(ctx context.Context, org, slug string, tea // DeleteTeamByID deletes a team referenced by ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#delete-a-team func (s *TeamsService) DeleteTeamByID(ctx context.Context, orgID, teamID int64) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v", orgID, teamID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -288,7 +288,7 @@ func (s *TeamsService) DeleteTeamByID(ctx context.Context, orgID, teamID int64) // DeleteTeamBySlug deletes a team reference by slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#delete-a-team func (s *TeamsService) DeleteTeamBySlug(ctx context.Context, org, slug string) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v", org, slug) req, err := s.client.NewRequest("DELETE", u, nil) @@ -301,7 +301,7 @@ func (s *TeamsService) DeleteTeamBySlug(ctx context.Context, org, slug string) ( // ListChildTeamsByParentID lists child teams for a parent team given parent ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-child-teams +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-child-teams func (s *TeamsService) ListChildTeamsByParentID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Team, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/teams", orgID, teamID) u, err := addOptions(u, opts) @@ -325,7 +325,7 @@ func (s *TeamsService) ListChildTeamsByParentID(ctx context.Context, orgID, team // ListChildTeamsByParentSlug lists child teams for a parent team given parent slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-child-teams +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-child-teams func (s *TeamsService) ListChildTeamsByParentSlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Team, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/teams", org, slug) u, err := addOptions(u, opts) @@ -349,7 +349,7 @@ func (s *TeamsService) ListChildTeamsByParentSlug(ctx context.Context, org, slug // ListTeamReposByID lists the repositories given a team ID that the specified team has access to. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-repositories +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-repositories func (s *TeamsService) ListTeamReposByID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Repository, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/repos", orgID, teamID) u, err := addOptions(u, opts) @@ -377,7 +377,7 @@ func (s *TeamsService) ListTeamReposByID(ctx context.Context, orgID, teamID int6 // ListTeamReposBySlug lists the repositories given a team slug that the specified team has access to. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-repositories +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-repositories func (s *TeamsService) ListTeamReposBySlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Repository, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/repos", org, slug) u, err := addOptions(u, opts) @@ -407,7 +407,7 @@ func (s *TeamsService) ListTeamReposBySlug(ctx context.Context, org, slug string // repository is managed by team, a Repository is returned which includes the // permissions team has for that repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#check-team-permissions-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository func (s *TeamsService) IsTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string) (*Repository, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -431,7 +431,7 @@ func (s *TeamsService) IsTeamRepoByID(ctx context.Context, orgID, teamID int64, // repository is managed by team, a Repository is returned which includes the // permissions team has for that repo. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#check-team-permissions-for-a-repository +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-repository func (s *TeamsService) IsTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string) (*Repository, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) req, err := s.client.NewRequest("GET", u, nil) @@ -452,7 +452,7 @@ func (s *TeamsService) IsTeamRepoBySlug(ctx context.Context, org, slug, owner, r } // TeamAddTeamRepoOptions specifies the optional parameters to the -// TeamsService.AddTeamRepo method. +// TeamsService.AddTeamRepoByID and TeamsService.AddTeamRepoBySlug methods. type TeamAddTeamRepoOptions struct { // Permission specifies the permission to grant the team on this repository. // Possible values are: @@ -470,7 +470,7 @@ type TeamAddTeamRepoOptions struct { // The specified repository must be owned by the organization to which the team // belongs, or a direct fork of a repository owned by the organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-repository-permissions +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-repository-permissions func (s *TeamsService) AddTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string, opts *TeamAddTeamRepoOptions) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) req, err := s.client.NewRequest("PUT", u, opts) @@ -485,7 +485,7 @@ func (s *TeamsService) AddTeamRepoByID(ctx context.Context, orgID, teamID int64, // The specified repository must be owned by the organization to which the team // belongs, or a direct fork of a repository owned by the organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-repository-permissions +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-repository-permissions func (s *TeamsService) AddTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string, opts *TeamAddTeamRepoOptions) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) req, err := s.client.NewRequest("PUT", u, opts) @@ -500,7 +500,7 @@ func (s *TeamsService) AddTeamRepoBySlug(ctx context.Context, org, slug, owner, // team given the team ID. Note that this does not delete the repository, it // just removes it from the team. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-a-repository-from-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-repository-from-a-team func (s *TeamsService) RemoveTeamRepoByID(ctx context.Context, orgID, teamID int64, owner, repo string) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/repos/%v/%v", orgID, teamID, owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) @@ -515,7 +515,7 @@ func (s *TeamsService) RemoveTeamRepoByID(ctx context.Context, orgID, teamID int // team given the team slug. Note that this does not delete the repository, it // just removes it from the team. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-a-repository-from-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-repository-from-a-team func (s *TeamsService) RemoveTeamRepoBySlug(ctx context.Context, org, slug, owner, repo string) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/repos/%v/%v", org, slug, owner, repo) req, err := s.client.NewRequest("DELETE", u, nil) @@ -527,7 +527,7 @@ func (s *TeamsService) RemoveTeamRepoBySlug(ctx context.Context, org, slug, owne } // ListUserTeams lists a user's teams -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-teams-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-teams-for-the-authenticated-user func (s *TeamsService) ListUserTeams(ctx context.Context, opts *ListOptions) ([]*Team, *Response, error) { u := "user/teams" u, err := addOptions(u, opts) @@ -551,7 +551,7 @@ func (s *TeamsService) ListUserTeams(ctx context.Context, opts *ListOptions) ([] // ListTeamProjectsByID lists the organization projects for a team given the team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-projects +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-projects func (s *TeamsService) ListTeamProjectsByID(ctx context.Context, orgID, teamID int64) ([]*Project, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/projects", orgID, teamID) @@ -575,7 +575,7 @@ func (s *TeamsService) ListTeamProjectsByID(ctx context.Context, orgID, teamID i // ListTeamProjectsBySlug lists the organization projects for a team given the team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-projects +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#list-team-projects func (s *TeamsService) ListTeamProjectsBySlug(ctx context.Context, org, slug string) ([]*Project, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/projects", org, slug) @@ -600,7 +600,7 @@ func (s *TeamsService) ListTeamProjectsBySlug(ctx context.Context, org, slug str // ReviewTeamProjectsByID checks whether a team, given its ID, has read, write, or admin // permissions for an organization project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#check-team-permissions-for-a-project +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-project func (s *TeamsService) ReviewTeamProjectsByID(ctx context.Context, orgID, teamID, projectID int64) (*Project, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) req, err := s.client.NewRequest("GET", u, nil) @@ -624,7 +624,7 @@ func (s *TeamsService) ReviewTeamProjectsByID(ctx context.Context, orgID, teamID // ReviewTeamProjectsBySlug checks whether a team, given its slug, has read, write, or admin // permissions for an organization project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#check-team-permissions-for-a-project +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#check-team-permissions-for-a-project func (s *TeamsService) ReviewTeamProjectsBySlug(ctx context.Context, org, slug string, projectID int64) (*Project, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) req, err := s.client.NewRequest("GET", u, nil) @@ -661,7 +661,7 @@ type TeamProjectOptions struct { // To add a project to a team or update the team's permission on a project, the // authenticated user must have admin permissions for the project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-project-permissions +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-project-permissions func (s *TeamsService) AddTeamProjectByID(ctx context.Context, orgID, teamID, projectID int64, opts *TeamProjectOptions) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) req, err := s.client.NewRequest("PUT", u, opts) @@ -680,7 +680,7 @@ func (s *TeamsService) AddTeamProjectByID(ctx context.Context, orgID, teamID, pr // To add a project to a team or update the team's permission on a project, the // authenticated user must have admin permissions for the project. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-project-permissions +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#add-or-update-team-project-permissions func (s *TeamsService) AddTeamProjectBySlug(ctx context.Context, org, slug string, projectID int64, opts *TeamProjectOptions) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) req, err := s.client.NewRequest("PUT", u, opts) @@ -702,7 +702,7 @@ func (s *TeamsService) AddTeamProjectBySlug(ctx context.Context, org, slug strin // or project. // Note: This endpoint removes the project from the team, but does not delete it. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-a-project-from-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-project-from-a-team func (s *TeamsService) RemoveTeamProjectByID(ctx context.Context, orgID, teamID, projectID int64) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/projects/%v", orgID, teamID, projectID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -724,7 +724,7 @@ func (s *TeamsService) RemoveTeamProjectByID(ctx context.Context, orgID, teamID, // or project. // Note: This endpoint removes the project from the team, but does not delete it. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-a-project-from-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/teams#remove-a-project-from-a-team func (s *TeamsService) RemoveTeamProjectBySlug(ctx context.Context, org, slug string, projectID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/projects/%v", org, slug, projectID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -753,7 +753,7 @@ type IDPGroup struct { // ListIDPGroupsInOrganization lists IDP groups available in an organization. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-idp-groups-for-an-organization +// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-an-organization func (s *TeamsService) ListIDPGroupsInOrganization(ctx context.Context, org string, opts *ListCursorOptions) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("orgs/%v/team-sync/groups", org) u, err := addOptions(u, opts) @@ -771,13 +771,14 @@ func (s *TeamsService) ListIDPGroupsInOrganization(ctx context.Context, org stri if err != nil { return nil, resp, err } + return groups, resp, nil } // ListIDPGroupsForTeamByID lists IDP groups connected to a team on GitHub // given organization and team IDs. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-idp-groups-for-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-a-team func (s *TeamsService) ListIDPGroupsForTeamByID(ctx context.Context, orgID, teamID int64) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/team-sync/group-mappings", orgID, teamID) @@ -791,13 +792,14 @@ func (s *TeamsService) ListIDPGroupsForTeamByID(ctx context.Context, orgID, team if err != nil { return nil, resp, err } - return groups, resp, err + + return groups, resp, nil } // ListIDPGroupsForTeamBySlug lists IDP groups connected to a team on GitHub // given organization name and team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-idp-groups-for-a-team +// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#list-idp-groups-for-a-team func (s *TeamsService) ListIDPGroupsForTeamBySlug(ctx context.Context, org, slug string) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/team-sync/group-mappings", org, slug) @@ -811,13 +813,14 @@ func (s *TeamsService) ListIDPGroupsForTeamBySlug(ctx context.Context, org, slug if err != nil { return nil, resp, err } - return groups, resp, err + + return groups, resp, nil } // CreateOrUpdateIDPGroupConnectionsByID creates, updates, or removes a connection // between a team and an IDP group given organization and team IDs. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-or-update-idp-group-connections +// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#create-or-update-idp-group-connections func (s *TeamsService) CreateOrUpdateIDPGroupConnectionsByID(ctx context.Context, orgID, teamID int64, opts IDPGroupList) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/team-sync/group-mappings", orgID, teamID) @@ -838,7 +841,7 @@ func (s *TeamsService) CreateOrUpdateIDPGroupConnectionsByID(ctx context.Context // CreateOrUpdateIDPGroupConnectionsBySlug creates, updates, or removes a connection // between a team and an IDP group given organization name and team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-or-update-idp-group-connections +// GitHub API docs: https://docs.github.com/en/rest/teams/team-sync#create-or-update-idp-group-connections func (s *TeamsService) CreateOrUpdateIDPGroupConnectionsBySlug(ctx context.Context, org, slug string, opts IDPGroupList) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/team-sync/group-mappings", org, slug) @@ -886,7 +889,7 @@ type ExternalGroupList struct { // GetExternalGroup fetches an external group. // -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/reference/teams#get-an-external-group +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#get-an-external-group func (s *TeamsService) GetExternalGroup(ctx context.Context, org string, groupID int64) (*ExternalGroup, *Response, error) { u := fmt.Sprintf("orgs/%v/external-group/%v", org, groupID) req, err := s.client.NewRequest("GET", u, nil) @@ -913,7 +916,7 @@ type ListExternalGroupsOptions struct { // ListExternalGroups lists external groups connected to a team on GitHub. // -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/reference/teams#list-external-groups-in-an-organization +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#list-external-groups-in-an-organization func (s *TeamsService) ListExternalGroups(ctx context.Context, org string, opts *ListExternalGroupsOptions) (*ExternalGroupList, *Response, error) { u := fmt.Sprintf("orgs/%v/external-groups", org) u, err := addOptions(u, opts) @@ -937,7 +940,7 @@ func (s *TeamsService) ListExternalGroups(ctx context.Context, org string, opts // UpdateConnectedExternalGroup updates the connection between an external group and a team. // -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/reference/teams#update-the-connection-between-an-external-group-and-a-team +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#update-the-connection-between-an-external-group-and-a-team func (s *TeamsService) UpdateConnectedExternalGroup(ctx context.Context, org, slug string, eg *ExternalGroup) (*ExternalGroup, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) @@ -957,7 +960,7 @@ func (s *TeamsService) UpdateConnectedExternalGroup(ctx context.Context, org, sl // RemoveConnectedExternalGroup removes the connection between an external group and a team. // -// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/reference/teams#remove-the-connection-between-an-external-group-and-a-team +// GitHub API docs: https://docs.github.com/en/enterprise-cloud@latest/rest/teams/external-groups#remove-the-connection-between-an-external-group-and-a-team func (s *TeamsService) RemoveConnectedExternalGroup(ctx context.Context, org, slug string) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/external-groups", org, slug) diff --git a/vendor/github.com/google/go-github/v42/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/v45/github/teams_discussion_comments.go similarity index 87% rename from vendor/github.com/google/go-github/v42/github/teams_discussion_comments.go rename to vendor/github.com/google/go-github/v45/github/teams_discussion_comments.go index b6c7e17845..f3a1cc4dc0 100644 --- a/vendor/github.com/google/go-github/v42/github/teams_discussion_comments.go +++ b/vendor/github.com/google/go-github/v45/github/teams_discussion_comments.go @@ -43,7 +43,7 @@ type DiscussionCommentListOptions struct { // ListCommentsByID lists all comments on a team discussion by team ID. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-discussion-comments +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#list-discussion-comments func (s *TeamsService) ListCommentsByID(ctx context.Context, orgID, teamID int64, discussionNumber int, options *DiscussionCommentListOptions) ([]*DiscussionComment, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discussionNumber) u, err := addOptions(u, options) @@ -68,7 +68,7 @@ func (s *TeamsService) ListCommentsByID(ctx context.Context, orgID, teamID int64 // ListCommentsBySlug lists all comments on a team discussion by team slug. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-discussion-comments +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#list-discussion-comments func (s *TeamsService) ListCommentsBySlug(ctx context.Context, org, slug string, discussionNumber int, options *DiscussionCommentListOptions) ([]*DiscussionComment, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments", org, slug, discussionNumber) u, err := addOptions(u, options) @@ -93,7 +93,7 @@ func (s *TeamsService) ListCommentsBySlug(ctx context.Context, org, slug string, // GetCommentByID gets a specific comment on a team discussion by team ID. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#get-a-discussion-comment func (s *TeamsService) GetCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) req, err := s.client.NewRequest("GET", u, nil) @@ -113,7 +113,7 @@ func (s *TeamsService) GetCommentByID(ctx context.Context, orgID, teamID int64, // GetCommentBySlug gets a specific comment on a team discussion by team slug. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#get-a-discussion-comment func (s *TeamsService) GetCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) @@ -134,7 +134,7 @@ func (s *TeamsService) GetCommentBySlug(ctx context.Context, org, slug string, d // CreateCommentByID creates a new comment on a team discussion by team ID. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#create-a-discussion-comment func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discsusionNumber) req, err := s.client.NewRequest("POST", u, comment) @@ -154,7 +154,7 @@ func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int6 // CreateCommentBySlug creates a new comment on a team discussion by team slug. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#create-a-discussion-comment func (s *TeamsService) CreateCommentBySlug(ctx context.Context, org, slug string, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments", org, slug, discsusionNumber) req, err := s.client.NewRequest("POST", u, comment) @@ -175,7 +175,7 @@ func (s *TeamsService) CreateCommentBySlug(ctx context.Context, org, slug string // Authenticated user must grant write:discussion scope. // User is allowed to edit body of a comment only. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#update-a-discussion-comment func (s *TeamsService) EditCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) req, err := s.client.NewRequest("PATCH", u, comment) @@ -196,7 +196,7 @@ func (s *TeamsService) EditCommentByID(ctx context.Context, orgID, teamID int64, // Authenticated user must grant write:discussion scope. // User is allowed to edit body of a comment only. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#update-a-discussion-comment func (s *TeamsService) EditCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) req, err := s.client.NewRequest("PATCH", u, comment) @@ -216,7 +216,7 @@ func (s *TeamsService) EditCommentBySlug(ctx context.Context, org, slug string, // DeleteCommentByID deletes a comment on a team discussion by team ID. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#delete-a-discussion-comment func (s *TeamsService) DeleteCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber, commentNumber int) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments/%v", orgID, teamID, discussionNumber, commentNumber) req, err := s.client.NewRequest("DELETE", u, nil) @@ -230,7 +230,7 @@ func (s *TeamsService) DeleteCommentByID(ctx context.Context, orgID, teamID int6 // DeleteCommentBySlug deletes a comment on a team discussion by team slug. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-discussion-comment +// GitHub API docs: https://docs.github.com/en/rest/teams/discussion-comments#delete-a-discussion-comment func (s *TeamsService) DeleteCommentBySlug(ctx context.Context, org, slug string, discussionNumber, commentNumber int) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v/comments/%v", org, slug, discussionNumber, commentNumber) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/teams_discussions.go b/vendor/github.com/google/go-github/v45/github/teams_discussions.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/teams_discussions.go rename to vendor/github.com/google/go-github/v45/github/teams_discussions.go index 5678548e9b..69a3ebd51f 100644 --- a/vendor/github.com/google/go-github/v42/github/teams_discussions.go +++ b/vendor/github.com/google/go-github/v45/github/teams_discussions.go @@ -49,7 +49,7 @@ type DiscussionListOptions struct { // ListDiscussionsByID lists all discussions on team's page given Organization and Team ID. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-discussions +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#list-discussions func (s *TeamsService) ListDiscussionsByID(ctx context.Context, orgID, teamID int64, opts *DiscussionListOptions) ([]*TeamDiscussion, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions", orgID, teamID) u, err := addOptions(u, opts) @@ -74,7 +74,7 @@ func (s *TeamsService) ListDiscussionsByID(ctx context.Context, orgID, teamID in // ListDiscussionsBySlug lists all discussions on team's page given Organization name and Team's slug. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-discussions +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#list-discussions func (s *TeamsService) ListDiscussionsBySlug(ctx context.Context, org, slug string, opts *DiscussionListOptions) ([]*TeamDiscussion, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions", org, slug) u, err := addOptions(u, opts) @@ -99,7 +99,7 @@ func (s *TeamsService) ListDiscussionsBySlug(ctx context.Context, org, slug stri // GetDiscussionByID gets a specific discussion on a team's page given Organization and Team ID. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#get-a-discussion func (s *TeamsService) GetDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) req, err := s.client.NewRequest("GET", u, nil) @@ -119,7 +119,7 @@ func (s *TeamsService) GetDiscussionByID(ctx context.Context, orgID, teamID int6 // GetDiscussionBySlug gets a specific discussion on a team's page given Organization name and Team's slug. // Authenticated user must grant read:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#get-a-discussion func (s *TeamsService) GetDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) req, err := s.client.NewRequest("GET", u, nil) @@ -139,7 +139,7 @@ func (s *TeamsService) GetDiscussionBySlug(ctx context.Context, org, slug string // CreateDiscussionByID creates a new discussion post on a team's page given Organization and Team ID. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#create-a-discussion func (s *TeamsService) CreateDiscussionByID(ctx context.Context, orgID, teamID int64, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions", orgID, teamID) req, err := s.client.NewRequest("POST", u, discussion) @@ -159,7 +159,7 @@ func (s *TeamsService) CreateDiscussionByID(ctx context.Context, orgID, teamID i // CreateDiscussionBySlug creates a new discussion post on a team's page given Organization name and Team's slug. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#create-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#create-a-discussion func (s *TeamsService) CreateDiscussionBySlug(ctx context.Context, org, slug string, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions", org, slug) req, err := s.client.NewRequest("POST", u, discussion) @@ -180,7 +180,7 @@ func (s *TeamsService) CreateDiscussionBySlug(ctx context.Context, org, slug str // Authenticated user must grant write:discussion scope. // User is allowed to change Title and Body of a discussion only. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#update-a-discussion func (s *TeamsService) EditDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) req, err := s.client.NewRequest("PATCH", u, discussion) @@ -201,7 +201,7 @@ func (s *TeamsService) EditDiscussionByID(ctx context.Context, orgID, teamID int // Authenticated user must grant write:discussion scope. // User is allowed to change Title and Body of a discussion only. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#update-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#update-a-discussion func (s *TeamsService) EditDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int, discussion TeamDiscussion) (*TeamDiscussion, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) req, err := s.client.NewRequest("PATCH", u, discussion) @@ -221,7 +221,7 @@ func (s *TeamsService) EditDiscussionBySlug(ctx context.Context, org, slug strin // DeleteDiscussionByID deletes a discussion from team's page given Organization and Team ID. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#delete-a-discussion func (s *TeamsService) DeleteDiscussionByID(ctx context.Context, orgID, teamID int64, discussionNumber int) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v", orgID, teamID, discussionNumber) req, err := s.client.NewRequest("DELETE", u, nil) @@ -235,7 +235,7 @@ func (s *TeamsService) DeleteDiscussionByID(ctx context.Context, orgID, teamID i // DeleteDiscussionBySlug deletes a discussion from team's page given Organization name and Team's slug. // Authenticated user must grant write:discussion scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#delete-a-discussion +// GitHub API docs: https://docs.github.com/en/rest/teams/discussions#delete-a-discussion func (s *TeamsService) DeleteDiscussionBySlug(ctx context.Context, org, slug string, discussionNumber int) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/discussions/%v", org, slug, discussionNumber) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/teams_members.go b/vendor/github.com/google/go-github/v45/github/teams_members.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/teams_members.go rename to vendor/github.com/google/go-github/v45/github/teams_members.go index e6ad448b0e..58cb79744e 100644 --- a/vendor/github.com/google/go-github/v42/github/teams_members.go +++ b/vendor/github.com/google/go-github/v45/github/teams_members.go @@ -23,7 +23,7 @@ type TeamListTeamMembersOptions struct { // ListTeamMembersByID lists all of the users who are members of a team, given a specified // organization ID, by team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-members +// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members func (s *TeamsService) ListTeamMembersByID(ctx context.Context, orgID, teamID int64, opts *TeamListTeamMembersOptions) ([]*User, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/members", orgID, teamID) u, err := addOptions(u, opts) @@ -48,7 +48,7 @@ func (s *TeamsService) ListTeamMembersByID(ctx context.Context, orgID, teamID in // ListTeamMembersBySlug lists all of the users who are members of a team, given a specified // organization name, by team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-team-members +// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members func (s *TeamsService) ListTeamMembersBySlug(ctx context.Context, org, slug string, opts *TeamListTeamMembersOptions) ([]*User, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/members", org, slug) u, err := addOptions(u, opts) @@ -73,7 +73,7 @@ func (s *TeamsService) ListTeamMembersBySlug(ctx context.Context, org, slug stri // GetTeamMembershipByID returns the membership status for a user in a team, given a specified // organization ID, by team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-team-members func (s *TeamsService) GetTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string) (*Membership, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) req, err := s.client.NewRequest("GET", u, nil) @@ -93,7 +93,7 @@ func (s *TeamsService) GetTeamMembershipByID(ctx context.Context, orgID, teamID // GetTeamMembershipBySlug returns the membership status for a user in a team, given a specified // organization name, by team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#get-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#get-team-membership-for-a-user func (s *TeamsService) GetTeamMembershipBySlug(ctx context.Context, org, slug, user string) (*Membership, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) req, err := s.client.NewRequest("GET", u, nil) @@ -127,7 +127,7 @@ type TeamAddTeamMembershipOptions struct { // AddTeamMembershipByID adds or invites a user to a team, given a specified // organization ID, by team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#add-or-update-team-membership-for-a-user func (s *TeamsService) AddTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string, opts *TeamAddTeamMembershipOptions) (*Membership, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) req, err := s.client.NewRequest("PUT", u, opts) @@ -147,7 +147,7 @@ func (s *TeamsService) AddTeamMembershipByID(ctx context.Context, orgID, teamID // AddTeamMembershipBySlug adds or invites a user to a team, given a specified // organization name, by team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#add-or-update-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#add-or-update-team-membership-for-a-user func (s *TeamsService) AddTeamMembershipBySlug(ctx context.Context, org, slug, user string, opts *TeamAddTeamMembershipOptions) (*Membership, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) req, err := s.client.NewRequest("PUT", u, opts) @@ -167,7 +167,7 @@ func (s *TeamsService) AddTeamMembershipBySlug(ctx context.Context, org, slug, u // RemoveTeamMembershipByID removes a user from a team, given a specified // organization ID, by team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#remove-team-membership-for-a-user func (s *TeamsService) RemoveTeamMembershipByID(ctx context.Context, orgID, teamID int64, user string) (*Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/memberships/%v", orgID, teamID, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -181,7 +181,7 @@ func (s *TeamsService) RemoveTeamMembershipByID(ctx context.Context, orgID, team // RemoveTeamMembershipBySlug removes a user from a team, given a specified // organization name, by team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#remove-team-membership-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/teams/members#remove-team-membership-for-a-user func (s *TeamsService) RemoveTeamMembershipBySlug(ctx context.Context, org, slug, user string) (*Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/memberships/%v", org, slug, user) req, err := s.client.NewRequest("DELETE", u, nil) @@ -195,7 +195,7 @@ func (s *TeamsService) RemoveTeamMembershipBySlug(ctx context.Context, org, slug // ListPendingTeamInvitationsByID gets pending invitation list of a team, given a specified // organization ID, by team ID. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-pending-team-invitations +// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-pending-team-invitations func (s *TeamsService) ListPendingTeamInvitationsByID(ctx context.Context, orgID, teamID int64, opts *ListOptions) ([]*Invitation, *Response, error) { u := fmt.Sprintf("organizations/%v/team/%v/invitations", orgID, teamID) u, err := addOptions(u, opts) @@ -220,7 +220,7 @@ func (s *TeamsService) ListPendingTeamInvitationsByID(ctx context.Context, orgID // ListPendingTeamInvitationsBySlug get pending invitation list of a team, given a specified // organization name, by team slug. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/teams/#list-pending-team-invitations +// GitHub API docs: https://docs.github.com/en/rest/teams/members#list-pending-team-invitations func (s *TeamsService) ListPendingTeamInvitationsBySlug(ctx context.Context, org, slug string, opts *ListOptions) ([]*Invitation, *Response, error) { u := fmt.Sprintf("orgs/%v/teams/%v/invitations", org, slug) u, err := addOptions(u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/timestamp.go b/vendor/github.com/google/go-github/v45/github/timestamp.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/timestamp.go rename to vendor/github.com/google/go-github/v45/github/timestamp.go diff --git a/vendor/github.com/google/go-github/v42/github/users.go b/vendor/github.com/google/go-github/v45/github/users.go similarity index 86% rename from vendor/github.com/google/go-github/v42/github/users.go rename to vendor/github.com/google/go-github/v45/github/users.go index f45b1f6711..d40d23e90f 100644 --- a/vendor/github.com/google/go-github/v42/github/users.go +++ b/vendor/github.com/google/go-github/v45/github/users.go @@ -13,7 +13,7 @@ import ( // UsersService handles communication with the user related // methods of the GitHub API. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/ +// GitHub API docs: https://docs.github.com/en/rest/users/ type UsersService service // User represents a GitHub user. @@ -63,12 +63,13 @@ type User struct { SubscriptionsURL *string `json:"subscriptions_url,omitempty"` // TextMatches is only populated from search results that request text matches - // See: search.go and https://docs.github.com/en/free-pro-team@latest/rest/reference/search/#text-match-metadata + // See: search.go and https://docs.github.com/en/rest/search/#text-match-metadata TextMatches []*TextMatch `json:"text_matches,omitempty"` - // Permissions identifies the permissions that a user has on a given - // repository. This is only populated when calling Repositories.ListCollaborators. + // Permissions and RoleName identify the permissions and role that a user has on a given + // repository. These are only populated when calling Repositories.ListCollaborators. Permissions map[string]bool `json:"permissions,omitempty"` + RoleName *string `json:"role_name,omitempty"` } func (u User) String() string { @@ -78,8 +79,8 @@ func (u User) String() string { // Get fetches a user. Passing the empty string will fetch the authenticated // user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#get-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#get-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/users#get-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/users#get-a-user func (s *UsersService) Get(ctx context.Context, user string) (*User, *Response, error) { var u string if user != "" { @@ -122,7 +123,7 @@ func (s *UsersService) GetByID(ctx context.Context, id int64) (*User, *Response, // Edit the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#update-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/users#update-the-authenticated-user func (s *UsersService) Edit(ctx context.Context, user *User) (*User, *Response, error) { u := "user" req, err := s.client.NewRequest("PATCH", u, user) @@ -164,7 +165,7 @@ type UserContext struct { // GetHovercard fetches contextual information about user. It requires authentication // via Basic Auth or via OAuth with the repo scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#get-contextual-information-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/users#get-contextual-information-for-a-user func (s *UsersService) GetHovercard(ctx context.Context, user string, opts *HovercardOptions) (*Hovercard, *Response, error) { u := fmt.Sprintf("users/%v/hovercard", user) u, err := addOptions(u, opts) @@ -202,7 +203,7 @@ type UserListOptions struct { // // To paginate through all users, populate 'Since' with the ID of the last user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-users +// GitHub API docs: https://docs.github.com/en/rest/users/users#list-users func (s *UsersService) ListAll(ctx context.Context, opts *UserListOptions) ([]*User, *Response, error) { u, err := addOptions("users", opts) if err != nil { @@ -226,7 +227,7 @@ func (s *UsersService) ListAll(ctx context.Context, opts *UserListOptions) ([]*U // ListInvitations lists all currently-open repository invitations for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#list-repository-invitations-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#list-repository-invitations-for-the-authenticated-user func (s *UsersService) ListInvitations(ctx context.Context, opts *ListOptions) ([]*RepositoryInvitation, *Response, error) { u, err := addOptions("user/repository_invitations", opts) if err != nil { @@ -250,7 +251,7 @@ func (s *UsersService) ListInvitations(ctx context.Context, opts *ListOptions) ( // AcceptInvitation accepts the currently-open repository invitation for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#accept-a-repository-invitation +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#accept-a-repository-invitation func (s *UsersService) AcceptInvitation(ctx context.Context, invitationID int64) (*Response, error) { u := fmt.Sprintf("user/repository_invitations/%v", invitationID) req, err := s.client.NewRequest("PATCH", u, nil) @@ -264,7 +265,7 @@ func (s *UsersService) AcceptInvitation(ctx context.Context, invitationID int64) // DeclineInvitation declines the currently-open repository invitation for the // authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/repos/#decline-a-repository-invitation +// GitHub API docs: https://docs.github.com/en/rest/collaborators/invitations#decline-a-repository-invitation func (s *UsersService) DeclineInvitation(ctx context.Context, invitationID int64) (*Response, error) { u := fmt.Sprintf("user/repository_invitations/%v", invitationID) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/users_administration.go b/vendor/github.com/google/go-github/v45/github/users_administration.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/users_administration.go rename to vendor/github.com/google/go-github/v45/github/users_administration.go diff --git a/vendor/github.com/google/go-github/v42/github/users_blocking.go b/vendor/github.com/google/go-github/v45/github/users_blocking.go similarity index 82% rename from vendor/github.com/google/go-github/v42/github/users_blocking.go rename to vendor/github.com/google/go-github/v45/github/users_blocking.go index cdbc2c2532..3d38d94789 100644 --- a/vendor/github.com/google/go-github/v42/github/users_blocking.go +++ b/vendor/github.com/google/go-github/v45/github/users_blocking.go @@ -12,7 +12,7 @@ import ( // ListBlockedUsers lists all the blocked users by the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-users-blocked-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/blocking#list-users-blocked-by-the-authenticated-user func (s *UsersService) ListBlockedUsers(ctx context.Context, opts *ListOptions) ([]*User, *Response, error) { u := "user/blocks" u, err := addOptions(u, opts) @@ -39,7 +39,7 @@ func (s *UsersService) ListBlockedUsers(ctx context.Context, opts *ListOptions) // IsBlocked reports whether specified user is blocked by the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#check-if-a-user-is-blocked-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/blocking#check-if-a-user-is-blocked-by-the-authenticated-user func (s *UsersService) IsBlocked(ctx context.Context, user string) (bool, *Response, error) { u := fmt.Sprintf("user/blocks/%v", user) @@ -58,7 +58,7 @@ func (s *UsersService) IsBlocked(ctx context.Context, user string) (bool, *Respo // BlockUser blocks specified user for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#block-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/blocking#block-a-user func (s *UsersService) BlockUser(ctx context.Context, user string) (*Response, error) { u := fmt.Sprintf("user/blocks/%v", user) @@ -75,7 +75,7 @@ func (s *UsersService) BlockUser(ctx context.Context, user string) (*Response, e // UnblockUser unblocks specified user for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#unblock-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/blocking#unblock-a-user func (s *UsersService) UnblockUser(ctx context.Context, user string) (*Response, error) { u := fmt.Sprintf("user/blocks/%v", user) diff --git a/vendor/github.com/google/go-github/v42/github/users_emails.go b/vendor/github.com/google/go-github/v45/github/users_emails.go similarity index 80% rename from vendor/github.com/google/go-github/v42/github/users_emails.go rename to vendor/github.com/google/go-github/v45/github/users_emails.go index 94e7fb81a6..be7e0f819e 100644 --- a/vendor/github.com/google/go-github/v42/github/users_emails.go +++ b/vendor/github.com/google/go-github/v45/github/users_emails.go @@ -17,7 +17,7 @@ type UserEmail struct { // ListEmails lists all email addresses for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-email-addresses-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/emails#list-email-addresses-for-the-authenticated-user func (s *UsersService) ListEmails(ctx context.Context, opts *ListOptions) ([]*UserEmail, *Response, error) { u := "user/emails" u, err := addOptions(u, opts) @@ -41,7 +41,7 @@ func (s *UsersService) ListEmails(ctx context.Context, opts *ListOptions) ([]*Us // AddEmails adds email addresses of the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#add-an-email-address-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/emails#add-an-email-address-for-the-authenticated-user func (s *UsersService) AddEmails(ctx context.Context, emails []string) ([]*UserEmail, *Response, error) { u := "user/emails" req, err := s.client.NewRequest("POST", u, emails) @@ -60,7 +60,7 @@ func (s *UsersService) AddEmails(ctx context.Context, emails []string) ([]*UserE // DeleteEmails deletes email addresses from authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#delete-an-email-address-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/emails#delete-an-email-address-for-the-authenticated-user func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Response, error) { u := "user/emails" req, err := s.client.NewRequest("DELETE", u, emails) diff --git a/vendor/github.com/google/go-github/v42/github/users_followers.go b/vendor/github.com/google/go-github/v45/github/users_followers.go similarity index 74% rename from vendor/github.com/google/go-github/v42/github/users_followers.go rename to vendor/github.com/google/go-github/v45/github/users_followers.go index f26392b6e2..1266e0e9ee 100644 --- a/vendor/github.com/google/go-github/v42/github/users_followers.go +++ b/vendor/github.com/google/go-github/v45/github/users_followers.go @@ -13,8 +13,8 @@ import ( // ListFollowers lists the followers for a user. Passing the empty string will // fetch followers for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-followers-of-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-followers-of-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-followers-of-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-followers-of-a-user func (s *UsersService) ListFollowers(ctx context.Context, user string, opts *ListOptions) ([]*User, *Response, error) { var u string if user != "" { @@ -44,8 +44,8 @@ func (s *UsersService) ListFollowers(ctx context.Context, user string, opts *Lis // ListFollowing lists the people that a user is following. Passing the empty // string will list people the authenticated user is following. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-the-people-the-authenticated-user-follows -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-the-people-a-user-follows +// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-the-people-the-authenticated-user-follows +// GitHub API docs: https://docs.github.com/en/rest/users/followers#list-the-people-a-user-follows func (s *UsersService) ListFollowing(ctx context.Context, user string, opts *ListOptions) ([]*User, *Response, error) { var u string if user != "" { @@ -75,8 +75,8 @@ func (s *UsersService) ListFollowing(ctx context.Context, user string, opts *Lis // IsFollowing checks if "user" is following "target". Passing the empty // string for "user" will check if the authenticated user is following "target". // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#check-if-a-person-is-followed-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#check-if-a-user-follows-another-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#check-if-a-person-is-followed-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#check-if-a-user-follows-another-user func (s *UsersService) IsFollowing(ctx context.Context, user, target string) (bool, *Response, error) { var u string if user != "" { @@ -97,7 +97,7 @@ func (s *UsersService) IsFollowing(ctx context.Context, user, target string) (bo // Follow will cause the authenticated user to follow the specified user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#follow-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#follow-a-user func (s *UsersService) Follow(ctx context.Context, user string) (*Response, error) { u := fmt.Sprintf("user/following/%v", user) req, err := s.client.NewRequest("PUT", u, nil) @@ -110,7 +110,7 @@ func (s *UsersService) Follow(ctx context.Context, user string) (*Response, erro // Unfollow will cause the authenticated user to unfollow the specified user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#unfollow-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/followers#unfollow-a-user func (s *UsersService) Unfollow(ctx context.Context, user string) (*Response, error) { u := fmt.Sprintf("user/following/%v", user) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/users_gpg_keys.go b/vendor/github.com/google/go-github/v45/github/users_gpg_keys.go similarity index 85% rename from vendor/github.com/google/go-github/v42/github/users_gpg_keys.go rename to vendor/github.com/google/go-github/v45/github/users_gpg_keys.go index 387cc9b038..e9ce62221c 100644 --- a/vendor/github.com/google/go-github/v42/github/users_gpg_keys.go +++ b/vendor/github.com/google/go-github/v45/github/users_gpg_keys.go @@ -45,8 +45,8 @@ type GPGEmail struct { // string will fetch keys for the authenticated user. It requires authentication // via Basic Auth or via OAuth with at least read:gpg_key scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-gpg-keys-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-gpg-keys-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#list-gpg-keys-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#list-gpg-keys-for-a-user func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opts *ListOptions) ([]*GPGKey, *Response, error) { var u string if user != "" { @@ -76,7 +76,7 @@ func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opts *ListO // GetGPGKey gets extended details for a single GPG key. It requires authentication // via Basic Auth or via OAuth with at least read:gpg_key scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#get-a-gpg-key-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#get-a-gpg-key-for-the-authenticated-user func (s *UsersService) GetGPGKey(ctx context.Context, id int64) (*GPGKey, *Response, error) { u := fmt.Sprintf("user/gpg_keys/%v", id) req, err := s.client.NewRequest("GET", u, nil) @@ -96,7 +96,7 @@ func (s *UsersService) GetGPGKey(ctx context.Context, id int64) (*GPGKey, *Respo // CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth // or OAuth with at least write:gpg_key scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#create-a-gpg-key +// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#create-a-gpg-key func (s *UsersService) CreateGPGKey(ctx context.Context, armoredPublicKey string) (*GPGKey, *Response, error) { gpgKey := &struct { ArmoredPublicKey string `json:"armored_public_key"` @@ -118,7 +118,7 @@ func (s *UsersService) CreateGPGKey(ctx context.Context, armoredPublicKey string // DeleteGPGKey deletes a GPG key. It requires authentication via Basic Auth or // via OAuth with at least admin:gpg_key scope. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#delete-a-gpg-key-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/gpg-keys#delete-a-gpg-key-for-the-authenticated-user func (s *UsersService) DeleteGPGKey(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("user/gpg_keys/%v", id) req, err := s.client.NewRequest("DELETE", u, nil) diff --git a/vendor/github.com/google/go-github/v42/github/users_keys.go b/vendor/github.com/google/go-github/v45/github/users_keys.go similarity index 78% rename from vendor/github.com/google/go-github/v42/github/users_keys.go rename to vendor/github.com/google/go-github/v45/github/users_keys.go index b5d4f79dfb..59d26cdefa 100644 --- a/vendor/github.com/google/go-github/v42/github/users_keys.go +++ b/vendor/github.com/google/go-github/v45/github/users_keys.go @@ -28,8 +28,8 @@ func (k Key) String() string { // ListKeys lists the verified public keys for a user. Passing the empty // string will fetch keys for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-public-ssh-keys-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#list-public-keys-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/users/keys#list-public-ssh-keys-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/keys#list-public-keys-for-a-user func (s *UsersService) ListKeys(ctx context.Context, user string, opts *ListOptions) ([]*Key, *Response, error) { var u string if user != "" { @@ -58,7 +58,7 @@ func (s *UsersService) ListKeys(ctx context.Context, user string, opts *ListOpti // GetKey fetches a single public key. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#get-a-public-ssh-key-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/keys#get-a-public-ssh-key-for-the-authenticated-user func (s *UsersService) GetKey(ctx context.Context, id int64) (*Key, *Response, error) { u := fmt.Sprintf("user/keys/%v", id) @@ -78,7 +78,7 @@ func (s *UsersService) GetKey(ctx context.Context, id int64) (*Key, *Response, e // CreateKey adds a public key for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#create-a-public-ssh-key-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/keys#create-a-public-ssh-key-for-the-authenticated-user func (s *UsersService) CreateKey(ctx context.Context, key *Key) (*Key, *Response, error) { u := "user/keys" @@ -98,7 +98,7 @@ func (s *UsersService) CreateKey(ctx context.Context, key *Key) (*Key, *Response // DeleteKey deletes a public key. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/users/#delete-a-public-ssh-key-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/users/keys#delete-a-public-ssh-key-for-the-authenticated-user func (s *UsersService) DeleteKey(ctx context.Context, id int64) (*Response, error) { u := fmt.Sprintf("user/keys/%v", id) diff --git a/vendor/github.com/google/go-github/v42/github/users_packages.go b/vendor/github.com/google/go-github/v45/github/users_packages.go similarity index 75% rename from vendor/github.com/google/go-github/v42/github/users_packages.go rename to vendor/github.com/google/go-github/v45/github/users_packages.go index cd20f8c189..da04919ecc 100644 --- a/vendor/github.com/google/go-github/v42/github/users_packages.go +++ b/vendor/github.com/google/go-github/v45/github/users_packages.go @@ -13,8 +13,8 @@ import ( // List the packages for a user. Passing the empty string for "user" will // list packages for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#list-packages-for-the-authenticated-users-namespace -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#list-packages-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-the-authenticated-users-namespace +// GitHub API docs: https://docs.github.com/en/rest/packages#list-packages-for-a-user func (s *UsersService) ListPackages(ctx context.Context, user string, opts *PackageListOptions) ([]*Package, *Response, error) { var u string if user != "" { @@ -44,8 +44,8 @@ func (s *UsersService) ListPackages(ctx context.Context, user string, opts *Pack // Get a package by name for a user. Passing the empty string for "user" will // get the package for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-for-a-user func (s *UsersService) GetPackage(ctx context.Context, user, packageType, packageName string) (*Package, *Response, error) { var u string if user != "" { @@ -71,8 +71,8 @@ func (s *UsersService) GetPackage(ctx context.Context, user, packageType, packag // Delete a package from a user. Passing the empty string for "user" will // delete the package for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-a-package-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-for-a-user func (s *UsersService) DeletePackage(ctx context.Context, user, packageType, packageName string) (*Response, error) { var u string if user != "" { @@ -92,8 +92,8 @@ func (s *UsersService) DeletePackage(ctx context.Context, user, packageType, pac // Restore a package to a user. Passing the empty string for "user" will // restore the package for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-a-package-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-a-package-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-for-a-user func (s *UsersService) RestorePackage(ctx context.Context, user, packageType, packageName string) (*Response, error) { var u string if user != "" { @@ -113,8 +113,8 @@ func (s *UsersService) RestorePackage(ctx context.Context, user, packageType, pa // Get all versions of a package for a user. Passing the empty string for "user" will // get versions for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-all-package-versions-for-a-package-owned-by-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/users#delete-an-email-address-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-all-package-versions-for-a-package-owned-by-a-user func (s *UsersService) PackageGetAllVersions(ctx context.Context, user, packageType, packageName string, opts *PackageListOptions) ([]*PackageVersion, *Response, error) { var u string if user != "" { @@ -144,8 +144,8 @@ func (s *UsersService) PackageGetAllVersions(ctx context.Context, user, packageT // Get a specific version of a package for a user. Passing the empty string for "user" will // get the version for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#get-a-package-version-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#get-a-package-version-for-a-user func (s *UsersService) PackageGetVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*PackageVersion, *Response, error) { var u string if user != "" { @@ -171,8 +171,8 @@ func (s *UsersService) PackageGetVersion(ctx context.Context, user, packageType, // Delete a package version for a user. Passing the empty string for "user" will // delete the version for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#delete-package-version-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-a-package-version-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#delete-package-version-for-a-user func (s *UsersService) PackageDeleteVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*Response, error) { var u string if user != "" { @@ -192,8 +192,8 @@ func (s *UsersService) PackageDeleteVersion(ctx context.Context, user, packageTy // Restore a package version to a user. Passing the empty string for "user" will // restore the version for the authenticated user. // -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-a-package-version-for-the-authenticated-user -// GitHub API docs: https://docs.github.com/en/rest/reference/packages#restore-package-version-for-a-user +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-a-package-version-for-the-authenticated-user +// GitHub API docs: https://docs.github.com/en/rest/packages#restore-package-version-for-a-user func (s *UsersService) PackageRestoreVersion(ctx context.Context, user, packageType, packageName string, packageVersionID int64) (*Response, error) { var u string if user != "" { diff --git a/vendor/github.com/google/go-github/v42/github/users_projects.go b/vendor/github.com/google/go-github/v45/github/users_projects.go similarity index 88% rename from vendor/github.com/google/go-github/v42/github/users_projects.go rename to vendor/github.com/google/go-github/v45/github/users_projects.go index dd9ceaf2f8..0cbd61f923 100644 --- a/vendor/github.com/google/go-github/v42/github/users_projects.go +++ b/vendor/github.com/google/go-github/v45/github/users_projects.go @@ -12,7 +12,7 @@ import ( // ListProjects lists the projects for the specified user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#list-user-projects +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#list-user-projects func (s *UsersService) ListProjects(ctx context.Context, user string, opts *ProjectListOptions) ([]*Project, *Response, error) { u := fmt.Sprintf("users/%v/projects", user) u, err := addOptions(u, opts) @@ -47,7 +47,7 @@ type CreateUserProjectOptions struct { // CreateProject creates a GitHub Project for the current user. // -// GitHub API docs: https://docs.github.com/en/free-pro-team@latest/rest/reference/projects/#create-a-user-project +// GitHub API docs: https://docs.github.com/en/rest/projects/projects#create-a-user-project func (s *UsersService) CreateProject(ctx context.Context, opts *CreateUserProjectOptions) (*Project, *Response, error) { u := "user/projects" req, err := s.client.NewRequest("POST", u, opts) diff --git a/vendor/github.com/google/go-github/v42/github/with_appengine.go b/vendor/github.com/google/go-github/v45/github/with_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/with_appengine.go rename to vendor/github.com/google/go-github/v45/github/with_appengine.go diff --git a/vendor/github.com/google/go-github/v42/github/without_appengine.go b/vendor/github.com/google/go-github/v45/github/without_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v42/github/without_appengine.go rename to vendor/github.com/google/go-github/v45/github/without_appengine.go diff --git a/vendor/github.com/google/trillian/CHANGELOG.md b/vendor/github.com/google/trillian/CHANGELOG.md index ca1ffb55c3..7cee37cb91 100644 --- a/vendor/github.com/google/trillian/CHANGELOG.md +++ b/vendor/github.com/google/trillian/CHANGELOG.md @@ -2,6 +2,24 @@ ## HEAD +* `countFromInformationSchema` function to add support for MySQL 8. + +### Removals + + * #2710: Unused `storage/tools/dumplib` was removed. The useful storage format + regression test moved to `integration/format`. + * #2711: Unused `storage/tools/hasher` removed. + * #2715: Packages under `merkle` are deprecated and to be removed. Use + https://github.com/transparency-dev/merkle instead. + +### Misc improvements + + * #2712: Fix MySQL world-writable config warning. + * #2726: Check the tile height invariant stricter. No changes required. + +### Dependency updates + * #2731: Update `protoc` from `v3.12.4` to `v3.20.1` + ## v1.4.0 * Recommended go version for development: 1.17 diff --git a/vendor/github.com/google/trillian/README.md b/vendor/github.com/google/trillian/README.md index 5d79bc16c6..b78162e144 100644 --- a/vendor/github.com/google/trillian/README.md +++ b/vendor/github.com/google/trillian/README.md @@ -181,7 +181,7 @@ the original files; if you do, you'll need to install the prerequisites: - protocol buffer definitions for standard Google APIs: ```bash - git clone https://github.com/googleapis/googleapis.git $GOPATH/src/github.com/googleapis/googleapis + git clone https://github.com/googleapis/googleapis.git $(go env GOPATH)/src/github.com/googleapis/googleapis ``` and run the following: diff --git a/vendor/github.com/google/trillian/client/log_client.go b/vendor/github.com/google/trillian/client/log_client.go index e92bdd5ee8..4083200912 100644 --- a/vendor/github.com/google/trillian/client/log_client.go +++ b/vendor/github.com/google/trillian/client/log_client.go @@ -26,6 +26,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/client/backoff" "github.com/google/trillian/types" + "github.com/transparency-dev/merkle" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -231,7 +232,7 @@ func (c *LogClient) UpdateRoot(ctx context.Context) (*types.LogRootV1, error) { // It is best to call this method with a context that will timeout to avoid // waiting forever. func (c *LogClient) WaitForInclusion(ctx context.Context, data []byte) error { - leaf := c.BuildLeaf(data) + leaf := prepareLeaf(c.hasher, data) // If a minimum merge delay has been configured, wait at least that long before // starting to poll @@ -305,7 +306,7 @@ func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int6 if want := indexes[0] + int64(i); index != want { return fmt.Errorf("missing index in contiugous index range. got: %v, want: %v", index, want) } - leaf := c.BuildLeaf(dataByIndex[index]) + leaf := prepareLeaf(c.hasher, dataByIndex[index]) leaf.LeafIndex = index leaves = append(leaves, leaf) } @@ -319,10 +320,19 @@ func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int6 // QueueLeaf adds a leaf to a Trillian log without blocking. // AlreadyExists is considered a success case by this function. func (c *LogClient) QueueLeaf(ctx context.Context, data []byte) error { - leaf := c.BuildLeaf(data) + leaf := prepareLeaf(c.hasher, data) _, err := c.client.QueueLeaf(ctx, &trillian.QueueLeafRequest{ LogId: c.LogID, Leaf: leaf, }) return err } + +// prepareLeaf returns a trillian.LogLeaf prepopulated with leaf data and hash. +func prepareLeaf(hasher merkle.LogHasher, data []byte) *trillian.LogLeaf { + leafHash := hasher.HashLeaf(data) + return &trillian.LogLeaf{ + LeafValue: data, + MerkleLeafHash: leafHash, + } +} diff --git a/vendor/github.com/google/trillian/client/log_verifier.go b/vendor/github.com/google/trillian/client/log_verifier.go index b9df35918b..3e8ecfff11 100644 --- a/vendor/github.com/google/trillian/client/log_verifier.go +++ b/vendor/github.com/google/trillian/client/log_verifier.go @@ -19,10 +19,10 @@ import ( "fmt" "github.com/google/trillian" - "github.com/google/trillian/merkle/hashers" - "github.com/google/trillian/merkle/logverifier" - "github.com/google/trillian/merkle/rfc6962" "github.com/google/trillian/types" + "github.com/transparency-dev/merkle" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" ) // LogVerifier allows verification of output from Trillian Logs, both regular @@ -30,16 +30,12 @@ import ( // after construction). type LogVerifier struct { // hasher is the hash strategy used to compute nodes in the Merkle tree. - hasher hashers.LogHasher - v logverifier.LogVerifier + hasher merkle.LogHasher } // NewLogVerifier returns an object that can verify output from Trillian Logs. -func NewLogVerifier(hasher hashers.LogHasher) *LogVerifier { - return &LogVerifier{ - hasher: hasher, - v: logverifier.New(hasher), - } +func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier { + return &LogVerifier{hasher: hasher} } // NewLogVerifierFromTree creates a new LogVerifier using the algorithms @@ -74,7 +70,7 @@ func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.Sig // Implicitly trust the first root we get. if trusted.TreeSize != 0 { // Verify consistency proof. - if err := c.v.VerifyConsistencyProof(int64(trusted.TreeSize), int64(r.TreeSize), trusted.RootHash, r.RootHash, consistency); err != nil { + if err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil { return nil, fmt.Errorf("failed to verify consistency proof from %d->%d %x->%x: %v", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err) } } @@ -83,25 +79,13 @@ func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.Sig // VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash // matches the given trusted root. -func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, proof *trillian.Proof) error { +func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, pf *trillian.Proof) error { if trusted == nil { return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil") } - if proof == nil { + if pf == nil { return fmt.Errorf("VerifyInclusionByHash() error: proof == nil") } - return c.v.VerifyInclusionProof(proof.LeafIndex, int64(trusted.TreeSize), proof.Hashes, - trusted.RootHash, leafHash) -} - -// BuildLeaf runs the leaf hasher over data and builds a leaf. -// TODO(pavelkalinnikov): This can be misleading as it creates a partially -// filled LogLeaf. Consider returning a pair instead, or leafHash only. -func (c *LogVerifier) BuildLeaf(data []byte) *trillian.LogLeaf { - leafHash := c.hasher.HashLeaf(data) - return &trillian.LogLeaf{ - LeafValue: data, - MerkleLeafHash: leafHash, - } + return proof.VerifyInclusion(c.hasher, uint64(pf.LeafIndex), trusted.TreeSize, leafHash, pf.Hashes, trusted.RootHash) } diff --git a/vendor/github.com/google/trillian/client/rpcflags/rpcflags.go b/vendor/github.com/google/trillian/client/rpcflags/rpcflags.go index 964442723e..351a351568 100644 --- a/vendor/github.com/google/trillian/client/rpcflags/rpcflags.go +++ b/vendor/github.com/google/trillian/client/rpcflags/rpcflags.go @@ -20,6 +20,7 @@ import ( "github.com/golang/glog" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) // tlsCertFile is the flag-assigned value for the path to the Trillian server's TLS certificate. @@ -32,7 +33,7 @@ func NewClientDialOptionsFromFlags() ([]grpc.DialOption, error) { if *tlsCertFile == "" { glog.Warning("Using an insecure gRPC connection to Trillian") - dialOpts = append(dialOpts, grpc.WithInsecure()) + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { creds, err := credentials.NewClientTLSFromFile(*tlsCertFile, "") if err != nil { diff --git a/vendor/github.com/google/trillian/cloudbuild.yaml b/vendor/github.com/google/trillian/cloudbuild.yaml index c9685f668e..b1ee8f780f 100644 --- a/vendor/github.com/google/trillian/cloudbuild.yaml +++ b/vendor/github.com/google/trillian/cloudbuild.yaml @@ -62,14 +62,6 @@ steps: waitFor: - prepare -# Run Bazel check. -- id: bazel - name: 'gcr.io/cloud-marketplace-containers/google/bazel:1.1.0' - entrypoint: bazel - args: ['build', '//:*'] - waitFor: - - prepare - # Presubmit - id: presubmit name: 'gcr.io/${PROJECT_ID}/trillian_testbase' diff --git a/vendor/github.com/google/trillian/cloudbuild_master.yaml b/vendor/github.com/google/trillian/cloudbuild_master.yaml index 0d96896ebc..751478890a 100644 --- a/vendor/github.com/google/trillian/cloudbuild_master.yaml +++ b/vendor/github.com/google/trillian/cloudbuild_master.yaml @@ -29,7 +29,7 @@ steps: waitFor: - tag_mysql - id: build_db_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/db_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA} @@ -39,7 +39,7 @@ steps: waitFor: - push_mysql - id: build_log_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA} @@ -48,7 +48,7 @@ steps: - --cache-dir= # Cache is in Google Container Registry waitFor: ["-"] - id: build_log_signer - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_signer/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA} diff --git a/vendor/github.com/google/trillian/cloudbuild_pr.yaml b/vendor/github.com/google/trillian/cloudbuild_pr.yaml index e8cace45bd..2309e5615f 100644 --- a/vendor/github.com/google/trillian/cloudbuild_pr.yaml +++ b/vendor/github.com/google/trillian/cloudbuild_pr.yaml @@ -35,7 +35,7 @@ steps: - tag_mysql - id: build_db_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/db_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/db_server:${COMMIT_SHA} @@ -45,7 +45,7 @@ steps: - push_mysql - id: build_log_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_server:${COMMIT_SHA} @@ -53,7 +53,7 @@ steps: - --cache-dir= # Cache is in Google Container Registry waitFor: ['-'] - id: build_log_signer - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_signer/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_signer:${COMMIT_SHA} @@ -73,7 +73,7 @@ steps: name: gcr.io/cloud-builders/kubectl args: - apply - - --server-dry-run + - --dry-run=server - -f=examples/deployment/kubernetes/etcd-deployment.yaml env: - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} @@ -106,7 +106,7 @@ steps: name: gcr.io/cloud-builders/kubectl args: - apply - - --server-dry-run + - --dry-run=server - -f=envsubst-spanner/etcd-cluster.yaml - -f=envsubst-spanner/trillian-ci-spanner.yaml - -f=envsubst-spanner/trillian-log-deployment.yaml @@ -153,7 +153,7 @@ steps: name: gcr.io/cloud-builders/kubectl args: - apply - - --server-dry-run + - --dry-run=server - --namespace=mysql - -f=envsubst-mysql/etcd-cluster.yaml - -f=envsubst-mysql/trillian-ci-mysql.yaml diff --git a/vendor/github.com/google/trillian/cloudbuild_tag.yaml b/vendor/github.com/google/trillian/cloudbuild_tag.yaml index 78e6748753..3455a9e4ab 100644 --- a/vendor/github.com/google/trillian/cloudbuild_tag.yaml +++ b/vendor/github.com/google/trillian/cloudbuild_tag.yaml @@ -25,7 +25,7 @@ steps: waitFor: - tag_mysql - id: build_db_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/db_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/db_server:${TAG_NAME} @@ -34,7 +34,7 @@ steps: waitFor: - push_mysql - id: build_log_server - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_server/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_server:${TAG_NAME} @@ -42,7 +42,7 @@ steps: - --cache-dir= # Cache is in Google Container Registry waitFor: ["-"] - id: build_log_signer - name: gcr.io/kaniko-project/executor + name: gcr.io/kaniko-project/executor:v1.6.0 args: - --dockerfile=examples/deployment/docker/log_signer/Dockerfile - --destination=gcr.io/${PROJECT_ID}/log_signer:${TAG_NAME} diff --git a/vendor/github.com/google/trillian/gen.go b/vendor/github.com/google/trillian/gen.go index b0db0b2cb2..df5b8036ad 100644 --- a/vendor/github.com/google/trillian/gen.go +++ b/vendor/github.com/google/trillian/gen.go @@ -14,7 +14,7 @@ package trillian -//go:generate protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/googleapis/googleapis --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. --go-grpc_opt=require_unimplemented_servers=false trillian_log_api.proto trillian_admin_api.proto trillian.proto --doc_out=markdown,api.md:./docs/ +//go:generate protoc -I=. -I=$GOPATH/src/github.com/googleapis/googleapis --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. --go-grpc_opt=require_unimplemented_servers=false trillian_log_api.proto trillian_admin_api.proto trillian.proto --doc_out=markdown,api.md:./docs/ //go:generate protoc -I=. --go_out=paths=source_relative:. crypto/keyspb/keyspb.proto //go:generate mockgen -package tmock -destination testonly/tmock/mock_log_server.go github.com/google/trillian TrillianLogServer diff --git a/vendor/github.com/google/trillian/merkle/logverifier/hash_chainer.go b/vendor/github.com/google/trillian/merkle/logverifier/hash_chainer.go deleted file mode 100644 index be7192e35d..0000000000 --- a/vendor/github.com/google/trillian/merkle/logverifier/hash_chainer.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logverifier - -import "github.com/google/trillian/merkle/hashers" - -// hashChainer provides convenience methods for hashing subranges of Merkle -// Tree proofs to obtain (sub-)tree hashes. Depending on how the path to a tree -// node relates to the query and/or tree borders, different methods are there. -// -// TODO(pavelkalinnikov): Add a Merkle Trees doc with visual explanations. -type hashChainer struct { - hasher hashers.LogHasher -} - -// chainInner computes a subtree hash for a node on or below the tree's right -// border. Assumes |proof| hashes are ordered from lower levels to upper, and -// |seed| is the initial subtree/leaf hash on the path located at the specified -// |index| on its level. -func (c hashChainer) chainInner(seed []byte, proof [][]byte, index int64) []byte { - for i, h := range proof { - if (index>>uint(i))&1 == 0 { - seed = c.hasher.HashChildren(seed, h) - } else { - seed = c.hasher.HashChildren(h, seed) - } - } - return seed -} - -// chainInnerRight computes a subtree hash like chainInner, but only takes -// hashes to the left from the path into consideration, which effectively means -// the result is a hash of the corresponding earlier version of this subtree. -func (c hashChainer) chainInnerRight(seed []byte, proof [][]byte, index int64) []byte { - for i, h := range proof { - if (index>>uint(i))&1 == 1 { - seed = c.hasher.HashChildren(h, seed) - } - } - return seed -} - -// chainBorderRight chains proof hashes along tree borders. This differs from -// inner chaining because |proof| contains only left-side subtree hashes. -func (c hashChainer) chainBorderRight(seed []byte, proof [][]byte) []byte { - for _, h := range proof { - seed = c.hasher.HashChildren(h, seed) - } - return seed -} diff --git a/vendor/github.com/google/trillian/merkle/logverifier/log_verifier.go b/vendor/github.com/google/trillian/merkle/logverifier/log_verifier.go deleted file mode 100644 index 3e023aa8d4..0000000000 --- a/vendor/github.com/google/trillian/merkle/logverifier/log_verifier.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logverifier - -import ( - "bytes" - "errors" - "fmt" - "math/bits" - - "github.com/google/trillian/merkle/hashers" -) - -// RootMismatchError occurs when an inclusion proof fails. -type RootMismatchError struct { - ExpectedRoot []byte - CalculatedRoot []byte -} - -func (e RootMismatchError) Error() string { - return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) -} - -// LogVerifier verifies inclusion and consistency proofs for append only logs. -type LogVerifier struct { - hasher hashers.LogHasher -} - -// New returns a new LogVerifier for a tree. -func New(hasher hashers.LogHasher) LogVerifier { - return LogVerifier{hasher} -} - -// VerifyInclusionProof verifies the correctness of the proof given the passed -// in information about the tree and leaf. -func (v LogVerifier) VerifyInclusionProof(leafIndex, treeSize int64, proof [][]byte, root []byte, leafHash []byte) error { - calcRoot, err := v.RootFromInclusionProof(leafIndex, treeSize, proof, leafHash) - if err != nil { - return err - } - if !bytes.Equal(calcRoot, root) { - return RootMismatchError{ - CalculatedRoot: calcRoot, - ExpectedRoot: root, - } - } - return nil -} - -// RootFromInclusionProof calculates the expected tree root given the proof and leaf. -// leafIndex starts at 0. treeSize is the number of nodes in the tree. -// proof is an array of neighbor nodes from the bottom to the root. -func (v LogVerifier) RootFromInclusionProof(leafIndex, treeSize int64, proof [][]byte, leafHash []byte) ([]byte, error) { - switch { - case leafIndex < 0: - return nil, fmt.Errorf("leafIndex %d < 0", leafIndex) - case treeSize < 0: - return nil, fmt.Errorf("treeSize %d < 0", treeSize) - case leafIndex >= treeSize: - return nil, fmt.Errorf("leafIndex is beyond treeSize: %d >= %d", leafIndex, treeSize) - } - if got, want := len(leafHash), v.hasher.Size(); got != want { - return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) - } - - inner, border := decompInclProof(leafIndex, treeSize) - if got, want := len(proof), inner+border; got != want { - return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) - } - - ch := hashChainer(v) - res := ch.chainInner(leafHash, proof[:inner], leafIndex) - res = ch.chainBorderRight(res, proof[inner:]) - return res, nil -} - -// VerifyConsistencyProof checks that the passed in consistency proof is valid -// between the passed in tree snapshots. Snapshots are the respective tree -// sizes. Accepts shapshot2 >= snapshot1 >= 0. -func (v LogVerifier) VerifyConsistencyProof(snapshot1, snapshot2 int64, root1, root2 []byte, proof [][]byte) error { - switch { - case snapshot1 < 0: - return fmt.Errorf("snapshot1 (%d) < 0 ", snapshot1) - case snapshot2 < snapshot1: - return fmt.Errorf("snapshot2 (%d) < snapshot1 (%d)", snapshot1, snapshot2) - case snapshot1 == snapshot2: - if !bytes.Equal(root1, root2) { - return RootMismatchError{ - CalculatedRoot: root1, - ExpectedRoot: root2, - } - } else if len(proof) > 0 { - return errors.New("root1 and root2 match, but proof is non-empty") - } - return nil // Proof OK. - case snapshot1 == 0: - // Any snapshot greater than 0 is consistent with snapshot 0. - if len(proof) > 0 { - return fmt.Errorf("expected empty proof, but got %d components", len(proof)) - } - return nil // Proof OK. - case len(proof) == 0: - return errors.New("empty proof") - } - - inner, border := decompInclProof(snapshot1-1, snapshot2) - shift := bits.TrailingZeros64(uint64(snapshot1)) - inner -= shift // Note: shift < inner if snapshot1 < snapshot2. - - // The proof includes the root hash for the sub-tree of size 2^shift. - seed, start := proof[0], 1 - if snapshot1 == 1<> uint(shift) // Start chaining from level |shift|. - hash1 := ch.chainInnerRight(seed, proof[:inner], mask) - hash1 = ch.chainBorderRight(hash1, proof[inner:]) - if !bytes.Equal(hash1, root1) { - return RootMismatchError{ - CalculatedRoot: hash1, - ExpectedRoot: root1, - } - } - - // Verify the second root. - hash2 := ch.chainInner(seed, proof[:inner], mask) - hash2 = ch.chainBorderRight(hash2, proof[inner:]) - if !bytes.Equal(hash2, root2) { - return RootMismatchError{ - CalculatedRoot: hash2, - ExpectedRoot: root2, - } - } - - return nil // Proof OK. -} - -// VerifiedPrefixHashFromInclusionProof calculates a root hash over leaves -// [0..subSize), based on the inclusion |proof| and |leafHash| for a leaf at -// index |subSize-1| in a tree of the specified |size| with the passed in -// |root| hash. -// Returns an error if the |proof| verification fails. The resulting smaller -// tree's root hash is trusted iff the bigger tree's |root| hash is trusted. -func (v LogVerifier) VerifiedPrefixHashFromInclusionProof( - subSize, size int64, - proof [][]byte, root []byte, leafHash []byte, -) ([]byte, error) { - if subSize <= 0 { - return nil, fmt.Errorf("subtree size is %d, want > 0", subSize) - } - leaf := subSize - 1 - if err := v.VerifyInclusionProof(leaf, size, proof, root, leafHash); err != nil { - return nil, err - } - - inner := innerProofSize(leaf, size) - ch := hashChainer(v) - res := ch.chainInnerRight(leafHash, proof[:inner], leaf) - res = ch.chainBorderRight(res, proof[inner:]) - return res, nil -} - -// decompInclProof breaks down inclusion proof for a leaf at the specified -// |index| in a tree of the specified |size| into 2 components. The splitting -// point between them is where paths to leaves |index| and |size-1| diverge. -// Returns lengths of the bottom and upper proof parts correspondingly. The sum -// of the two determines the correct length of the inclusion proof. -func decompInclProof(index, size int64) (int, int) { - inner := innerProofSize(index, size) - border := bits.OnesCount64(uint64(index) >> uint(inner)) - return inner, border -} - -func innerProofSize(index, size int64) int { - return bits.Len64(uint64(index ^ (size - 1))) -} diff --git a/vendor/github.com/google/trillian/trillian.pb.go b/vendor/github.com/google/trillian/trillian.pb.go index 50d58e099f..e127dd0a6b 100644 --- a/vendor/github.com/google/trillian/trillian.pb.go +++ b/vendor/github.com/google/trillian/trillian.pb.go @@ -14,18 +14,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.28.0 +// protoc v3.20.1 // source: trillian.proto package trillian import ( - any "github.com/golang/protobuf/ptypes/any" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -316,16 +316,16 @@ type Tree struct { Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"` // Storage-specific settings. // Varies according to the storage implementation backing Trillian. - StorageSettings *any.Any `protobuf:"bytes,13,opt,name=storage_settings,json=storageSettings,proto3" json:"storage_settings,omitempty"` + StorageSettings *anypb.Any `protobuf:"bytes,13,opt,name=storage_settings,json=storageSettings,proto3" json:"storage_settings,omitempty"` // Interval after which a new signed root is produced even if there have been // no submission. If zero, this behavior is disabled. - MaxRootDuration *duration.Duration `protobuf:"bytes,15,opt,name=max_root_duration,json=maxRootDuration,proto3" json:"max_root_duration,omitempty"` + MaxRootDuration *durationpb.Duration `protobuf:"bytes,15,opt,name=max_root_duration,json=maxRootDuration,proto3" json:"max_root_duration,omitempty"` // Time of tree creation. // Readonly. - CreateTime *timestamp.Timestamp `protobuf:"bytes,16,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Time of last tree update. // Readonly (automatically assigned on updates). - UpdateTime *timestamp.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // If true, the tree has been deleted. // Deleted trees may be undeleted during a certain time window, after which // they're permanently deleted (and unrecoverable). @@ -333,7 +333,7 @@ type Tree struct { Deleted bool `protobuf:"varint,19,opt,name=deleted,proto3" json:"deleted,omitempty"` // Time of tree deletion, if any. // Readonly. - DeleteTime *timestamp.Timestamp `protobuf:"bytes,20,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` } func (x *Tree) Reset() { @@ -403,28 +403,28 @@ func (x *Tree) GetDescription() string { return "" } -func (x *Tree) GetStorageSettings() *any.Any { +func (x *Tree) GetStorageSettings() *anypb.Any { if x != nil { return x.StorageSettings } return nil } -func (x *Tree) GetMaxRootDuration() *duration.Duration { +func (x *Tree) GetMaxRootDuration() *durationpb.Duration { if x != nil { return x.MaxRootDuration } return nil } -func (x *Tree) GetCreateTime() *timestamp.Timestamp { +func (x *Tree) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } return nil } -func (x *Tree) GetUpdateTime() *timestamp.Timestamp { +func (x *Tree) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -438,7 +438,7 @@ func (x *Tree) GetDeleted() bool { return false } -func (x *Tree) GetDeleteTime() *timestamp.Timestamp { +func (x *Tree) GetDeleteTime() *timestamppb.Timestamp { if x != nil { return x.DeleteTime } @@ -709,16 +709,16 @@ func file_trillian_proto_rawDescGZIP() []byte { var file_trillian_proto_enumTypes = make([]protoimpl.EnumInfo, 4) var file_trillian_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_trillian_proto_goTypes = []interface{}{ - (LogRootFormat)(0), // 0: trillian.LogRootFormat - (HashStrategy)(0), // 1: trillian.HashStrategy - (TreeState)(0), // 2: trillian.TreeState - (TreeType)(0), // 3: trillian.TreeType - (*Tree)(nil), // 4: trillian.Tree - (*SignedLogRoot)(nil), // 5: trillian.SignedLogRoot - (*Proof)(nil), // 6: trillian.Proof - (*any.Any)(nil), // 7: google.protobuf.Any - (*duration.Duration)(nil), // 8: google.protobuf.Duration - (*timestamp.Timestamp)(nil), // 9: google.protobuf.Timestamp + (LogRootFormat)(0), // 0: trillian.LogRootFormat + (HashStrategy)(0), // 1: trillian.HashStrategy + (TreeState)(0), // 2: trillian.TreeState + (TreeType)(0), // 3: trillian.TreeType + (*Tree)(nil), // 4: trillian.Tree + (*SignedLogRoot)(nil), // 5: trillian.SignedLogRoot + (*Proof)(nil), // 6: trillian.Proof + (*anypb.Any)(nil), // 7: google.protobuf.Any + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp } var file_trillian_proto_depIdxs = []int32{ 2, // 0: trillian.Tree.tree_state:type_name -> trillian.TreeState diff --git a/vendor/github.com/google/trillian/trillian_admin_api.pb.go b/vendor/github.com/google/trillian/trillian_admin_api.pb.go index 1aff3a4ec6..8e05f5bb1f 100644 --- a/vendor/github.com/google/trillian/trillian_admin_api.pb.go +++ b/vendor/github.com/google/trillian/trillian_admin_api.pb.go @@ -14,16 +14,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.28.0 +// protoc v3.20.1 // source: trillian_admin_api.proto package trillian import ( - field_mask "google.golang.org/genproto/protobuf/field_mask" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" reflect "reflect" sync "sync" ) @@ -244,7 +244,7 @@ type UpdateTreeRequest struct { Tree *Tree `protobuf:"bytes,1,opt,name=tree,proto3" json:"tree,omitempty"` // Fields modified by the update request. // For example: "tree_state", "display_name", "description". - UpdateMask *field_mask.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateTreeRequest) Reset() { @@ -286,7 +286,7 @@ func (x *UpdateTreeRequest) GetTree() *Tree { return nil } -func (x *UpdateTreeRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateTreeRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -474,15 +474,15 @@ func file_trillian_admin_api_proto_rawDescGZIP() []byte { var file_trillian_admin_api_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_trillian_admin_api_proto_goTypes = []interface{}{ - (*ListTreesRequest)(nil), // 0: trillian.ListTreesRequest - (*ListTreesResponse)(nil), // 1: trillian.ListTreesResponse - (*GetTreeRequest)(nil), // 2: trillian.GetTreeRequest - (*CreateTreeRequest)(nil), // 3: trillian.CreateTreeRequest - (*UpdateTreeRequest)(nil), // 4: trillian.UpdateTreeRequest - (*DeleteTreeRequest)(nil), // 5: trillian.DeleteTreeRequest - (*UndeleteTreeRequest)(nil), // 6: trillian.UndeleteTreeRequest - (*Tree)(nil), // 7: trillian.Tree - (*field_mask.FieldMask)(nil), // 8: google.protobuf.FieldMask + (*ListTreesRequest)(nil), // 0: trillian.ListTreesRequest + (*ListTreesResponse)(nil), // 1: trillian.ListTreesResponse + (*GetTreeRequest)(nil), // 2: trillian.GetTreeRequest + (*CreateTreeRequest)(nil), // 3: trillian.CreateTreeRequest + (*UpdateTreeRequest)(nil), // 4: trillian.UpdateTreeRequest + (*DeleteTreeRequest)(nil), // 5: trillian.DeleteTreeRequest + (*UndeleteTreeRequest)(nil), // 6: trillian.UndeleteTreeRequest + (*Tree)(nil), // 7: trillian.Tree + (*fieldmaskpb.FieldMask)(nil), // 8: google.protobuf.FieldMask } var file_trillian_admin_api_proto_depIdxs = []int32{ 7, // 0: trillian.ListTreesResponse.tree:type_name -> trillian.Tree diff --git a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go index 766bc38f4e..6253c03093 100644 --- a/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go +++ b/vendor/github.com/google/trillian/trillian_admin_api_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: trillian_admin_api.proto package trillian diff --git a/vendor/github.com/google/trillian/trillian_log_api.pb.go b/vendor/github.com/google/trillian/trillian_log_api.pb.go index 891e616892..451bfa2475 100644 --- a/vendor/github.com/google/trillian/trillian_log_api.pb.go +++ b/vendor/github.com/google/trillian/trillian_log_api.pb.go @@ -14,17 +14,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.28.0 +// protoc v3.20.1 // source: trillian_log_api.proto package trillian import ( - timestamp "github.com/golang/protobuf/ptypes/timestamp" status "google.golang.org/genproto/googleapis/rpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -1350,10 +1350,10 @@ type LogLeaf struct { // queue_timestamp holds the time at which this leaf was queued for // inclusion in the Log, or zero if the entry was submitted without // queuing. Clients should not set this field on submissions. - QueueTimestamp *timestamp.Timestamp `protobuf:"bytes,6,opt,name=queue_timestamp,json=queueTimestamp,proto3" json:"queue_timestamp,omitempty"` + QueueTimestamp *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=queue_timestamp,json=queueTimestamp,proto3" json:"queue_timestamp,omitempty"` // integrate_timestamp holds the time at which this leaf was integrated into // the tree. Clients should not set this field on submissions. - IntegrateTimestamp *timestamp.Timestamp `protobuf:"bytes,7,opt,name=integrate_timestamp,json=integrateTimestamp,proto3" json:"integrate_timestamp,omitempty"` + IntegrateTimestamp *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=integrate_timestamp,json=integrateTimestamp,proto3" json:"integrate_timestamp,omitempty"` } func (x *LogLeaf) Reset() { @@ -1423,14 +1423,14 @@ func (x *LogLeaf) GetLeafIdentityHash() []byte { return nil } -func (x *LogLeaf) GetQueueTimestamp() *timestamp.Timestamp { +func (x *LogLeaf) GetQueueTimestamp() *timestamppb.Timestamp { if x != nil { return x.QueueTimestamp } return nil } -func (x *LogLeaf) GetIntegrateTimestamp() *timestamp.Timestamp { +func (x *LogLeaf) GetIntegrateTimestamp() *timestamppb.Timestamp { if x != nil { return x.IntegrateTimestamp } @@ -1730,7 +1730,7 @@ var file_trillian_log_api_proto_goTypes = []interface{}{ (*Proof)(nil), // 21: trillian.Proof (*SignedLogRoot)(nil), // 22: trillian.SignedLogRoot (*status.Status)(nil), // 23: google.rpc.Status - (*timestamp.Timestamp)(nil), // 24: google.protobuf.Timestamp + (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp } var file_trillian_log_api_proto_depIdxs = []int32{ 20, // 0: trillian.QueueLeafRequest.leaf:type_name -> trillian.LogLeaf diff --git a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go b/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go index 95f867cbbe..f2fe92d11c 100644 --- a/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go +++ b/vendor/github.com/google/trillian/trillian_log_api_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: trillian_log_api.proto package trillian diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index b5d1e6cf81..9b1b81f529 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -178,11 +178,17 @@ type serverMetadataKey struct{} // NewServerMetadataContext creates a new context with ServerMetadata func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } return context.WithValue(ctx, serverMetadataKey{}, md) } // ServerMetadataFromContext returns the ServerMetadata in ctx func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) return } diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go index 10af56bb99..fa92de4b3f 100644 --- a/vendor/github.com/hashicorp/vault/api/auth.go +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -31,16 +31,82 @@ func (a *Auth) Login(ctx context.Context, authMethod AuthMethod) (*Secret, error if authMethod == nil { return nil, fmt.Errorf("no auth method provided for login") } + return a.login(ctx, authMethod) +} + +// MFALogin is a wrapper that helps satisfy Vault's MFA implementation. +// If optional credentials are provided a single-phase login will be attempted +// and the resulting Secret will contain a ClientToken if the authentication is successful. +// The client's token will also be set accordingly. +// +// If no credentials are provided a two-phase MFA login will be assumed and the resulting +// Secret will have a MFARequirement containing the MFARequestID to be used in a follow-up +// call to `sys/mfa/validate` or by passing it to the method (*Auth).MFAValidate. +func (a *Auth) MFALogin(ctx context.Context, authMethod AuthMethod, creds ...string) (*Secret, error) { + if len(creds) > 0 { + a.c.SetMFACreds(creds) + return a.login(ctx, authMethod) + } + + return a.twoPhaseMFALogin(ctx, authMethod) +} + +// MFAValidate validates an MFA request using the appropriate payload and a secret containing +// Auth.MFARequirement, like the one returned by MFALogin when credentials are not provided. +// Upon successful validation the client token will be set accordingly. +// +// The Secret returned is the authentication secret, which if desired can be +// passed as input to the NewLifetimeWatcher method in order to start +// automatically renewing the token. +func (a *Auth) MFAValidate(ctx context.Context, mfaSecret *Secret, payload map[string]interface{}) (*Secret, error) { + if mfaSecret == nil || mfaSecret.Auth == nil || mfaSecret.Auth.MFARequirement == nil { + return nil, fmt.Errorf("secret does not contain MFARequirements") + } + + s, err := a.c.Sys().MFAValidateWithContext(ctx, mfaSecret.Auth.MFARequirement.GetMFARequestID(), payload) + if err != nil { + return nil, err + } + + return a.checkAndSetToken(s) +} - authSecret, err := authMethod.Login(ctx, a.c) +// login performs the (*AuthMethod).Login() with the configured client and checks that a ClientToken is returned +func (a *Auth) login(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) if err != nil { return nil, fmt.Errorf("unable to log in to auth method: %w", err) } - if authSecret == nil || authSecret.Auth == nil || authSecret.Auth.ClientToken == "" { - return nil, fmt.Errorf("login response from auth method did not return client token") + + return a.checkAndSetToken(s) +} + +// twoPhaseMFALogin performs the (*AuthMethod).Login() with the configured client +// and checks that an MFARequirement is returned +func (a *Auth) twoPhaseMFALogin(ctx context.Context, authMethod AuthMethod) (*Secret, error) { + s, err := authMethod.Login(ctx, a.c) + if err != nil { + return nil, fmt.Errorf("unable to log in: %w", err) + } + if s == nil || s.Auth == nil || s.Auth.MFARequirement == nil { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain MFARequirements") + } + return s, fmt.Errorf("assumed two-phase MFA login, returned secret is missing MFARequirements") + } + + return s, nil +} + +func (a *Auth) checkAndSetToken(s *Secret) (*Secret, error) { + if s == nil || s.Auth == nil || s.Auth.ClientToken == "" { + if s != nil { + s.Warnings = append(s.Warnings, "expected secret to contain ClientToken") + } + return s, fmt.Errorf("response did not return ClientToken, client token not set") } - a.c.SetToken(authSecret.Auth.ClientToken) + a.c.SetToken(s.Auth.ClientToken) - return authSecret, nil + return s, nil } diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index 99813a21b1..b5f7e9bb82 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -36,6 +36,7 @@ const ( EnvVaultAddress = "VAULT_ADDR" EnvVaultAgentAddr = "VAULT_AGENT_ADDR" EnvVaultCACert = "VAULT_CACERT" + EnvVaultCACertBytes = "VAULT_CACERT_BYTES" EnvVaultCAPath = "VAULT_CAPATH" EnvVaultClientCert = "VAULT_CLIENT_CERT" EnvVaultClientKey = "VAULT_CLIENT_KEY" @@ -50,6 +51,7 @@ const ( EnvVaultMFA = "VAULT_MFA" EnvRateLimit = "VAULT_RATE_LIMIT" EnvHTTPProxy = "VAULT_HTTP_PROXY" + EnvVaultProxyAddr = "VAULT_PROXY_ADDR" HeaderIndex = "X-Vault-Index" HeaderForward = "X-Vault-Forward" HeaderInconsistent = "X-Vault-Inconsistent" @@ -142,6 +144,14 @@ type Config struct { // with the same client. Cloning a client will not clone this value. OutputCurlString bool + // OutputPolicy causes the actual request to return an error of type + // *OutputPolicyError. Type asserting the error message will display + // an example of the required policy HCL needed for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputPolicy bool + // curlCACert, curlCAPath, curlClientCert and curlClientKey are used to keep // track of the name of the TLS certs and keys when OutputCurlString is set. // Cloning a client will also not clone those values. @@ -172,9 +182,14 @@ type Config struct { // used to communicate with Vault. type TLSConfig struct { // CACert is the path to a PEM-encoded CA cert file to use to verify the - // Vault server SSL certificate. + // Vault server SSL certificate. It takes precedence over CACertBytes + // and CAPath. CACert string + // CACertBytes is a PEM-encoded certificate or bundle. It takes precedence + // over CAPath. + CACertBytes []byte + // CAPath is the path to a directory of PEM-encoded CA cert files to verify // the Vault server SSL certificate. CAPath string @@ -266,12 +281,13 @@ func (c *Config) configureTLS(t *TLSConfig) error { return fmt.Errorf("both client cert and client key must be provided") } - if t.CACert != "" || t.CAPath != "" { + if t.CACert != "" || len(t.CACertBytes) != 0 || t.CAPath != "" { c.curlCACert = t.CACert c.curlCAPath = t.CAPath rootConfig := &rootcerts.Config{ - CAFile: t.CACert, - CAPath: t.CAPath, + CAFile: t.CACert, + CACertificate: t.CACertBytes, + CAPath: t.CAPath, } if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { return err @@ -313,6 +329,7 @@ func (c *Config) ReadEnvironment() error { var envAddress string var envAgentAddress string var envCACert string + var envCACertBytes []byte var envCAPath string var envClientCert string var envClientKey string @@ -322,7 +339,7 @@ func (c *Config) ReadEnvironment() error { var envMaxRetries *uint64 var envSRVLookup bool var limit *rate.Limiter - var envHTTPProxy string + var envVaultProxy string // Parse the environment variables if v := os.Getenv(EnvVaultAddress); v != "" { @@ -343,6 +360,9 @@ func (c *Config) ReadEnvironment() error { if v := os.Getenv(EnvVaultCACert); v != "" { envCACert = v } + if v := os.Getenv(EnvVaultCACertBytes); v != "" { + envCACertBytes = []byte(v) + } if v := os.Getenv(EnvVaultCAPath); v != "" { envCAPath = v } @@ -392,12 +412,18 @@ func (c *Config) ReadEnvironment() error { } if v := os.Getenv(EnvHTTPProxy); v != "" { - envHTTPProxy = v + envVaultProxy = v + } + + // VAULT_PROXY_ADDR supersedes VAULT_HTTP_PROXY + if v := os.Getenv(EnvVaultProxyAddr); v != "" { + envVaultProxy = v } // Configure the HTTP clients TLS configuration. t := &TLSConfig{ CACert: envCACert, + CACertBytes: envCACertBytes, CAPath: envCAPath, ClientCert: envClientCert, ClientKey: envClientKey, @@ -431,14 +457,14 @@ func (c *Config) ReadEnvironment() error { c.Timeout = envClientTimeout } - if envHTTPProxy != "" { - url, err := url.Parse(envHTTPProxy) + if envVaultProxy != "" { + u, err := url.Parse(envVaultProxy) if err != nil { return err } transport := c.HttpClient.Transport.(*http.Transport) - transport.Proxy = http.ProxyURL(url) + transport.Proxy = http.ProxyURL(u) } return nil @@ -576,7 +602,6 @@ func (c *Client) CloneConfig() *Config { newConfig.CheckRetry = c.config.CheckRetry newConfig.Logger = c.config.Logger newConfig.Limiter = c.config.Limiter - newConfig.OutputCurlString = c.config.OutputCurlString newConfig.SRVLookup = c.config.SRVLookup newConfig.CloneHeaders = c.config.CloneHeaders newConfig.CloneToken = c.config.CloneToken @@ -589,7 +614,7 @@ func (c *Client) CloneConfig() *Config { return newConfig } -// Sets the address of Vault in the client. The format of address should be +// SetAddress sets the address of Vault in the client. The format of address should be // "://:". Setting this on a client will override the // value of VAULT_ADDR environment variable. func (c *Client) SetAddress(addr string) error { @@ -616,6 +641,16 @@ func (c *Client) Address() string { return c.addr.String() } +func (c *Client) SetCheckRedirect(f func(*http.Request, []*http.Request) error) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.HttpClient.CheckRedirect = f +} + // SetLimiter will set the rate limiter for this client. // This method is thread-safe. // rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter @@ -768,6 +803,24 @@ func (c *Client) SetOutputCurlString(curl bool) { c.config.OutputCurlString = curl } +func (c *Client) OutputPolicy() bool { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + + return c.config.OutputPolicy +} + +func (c *Client) SetOutputPolicy(isSet bool) { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + + c.config.OutputPolicy = isSet +} + // CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs // for a given operation and path. func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { @@ -808,10 +861,39 @@ func (c *Client) setNamespace(namespace string) { c.headers.Set(consts.NamespaceHeaderName, namespace) } +// ClearNamespace removes the namespace header if set. func (c *Client) ClearNamespace() { c.modifyLock.Lock() defer c.modifyLock.Unlock() - c.headers.Del(consts.NamespaceHeaderName) + if c.headers != nil { + c.headers.Del(consts.NamespaceHeaderName) + } +} + +// Namespace returns the namespace currently set in this client. It will +// return an empty string if there is no namespace set. +func (c *Client) Namespace() string { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + if c.headers == nil { + return "" + } + return c.headers.Get(consts.NamespaceHeaderName) +} + +// WithNamespace makes a shallow copy of Client, modifies it to use +// the given namespace, and returns it. Passing an empty string will +// temporarily unset the namespace. +func (c *Client) WithNamespace(namespace string) *Client { + c2 := *c + c2.modifyLock = sync.RWMutex{} + c2.headers = c.Headers() + if namespace == "" { + c2.ClearNamespace() + } else { + c2.SetNamespace(namespace) + } + return &c2 } // Token returns the access token being used by this client. It will @@ -990,22 +1072,21 @@ func (c *Client) clone(cloneHeaders bool) (*Client, error) { defer config.modifyLock.RUnlock() newConfig := &Config{ - Address: config.Address, - HttpClient: config.HttpClient, - MinRetryWait: config.MinRetryWait, - MaxRetryWait: config.MaxRetryWait, - MaxRetries: config.MaxRetries, - Timeout: config.Timeout, - Backoff: config.Backoff, - CheckRetry: config.CheckRetry, - Logger: config.Logger, - Limiter: config.Limiter, - OutputCurlString: config.OutputCurlString, - AgentAddress: config.AgentAddress, - SRVLookup: config.SRVLookup, - CloneHeaders: config.CloneHeaders, - CloneToken: config.CloneToken, - ReadYourWrites: config.ReadYourWrites, + Address: config.Address, + HttpClient: config.HttpClient, + MinRetryWait: config.MinRetryWait, + MaxRetryWait: config.MaxRetryWait, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + CheckRetry: config.CheckRetry, + Logger: config.Logger, + Limiter: config.Limiter, + AgentAddress: config.AgentAddress, + SRVLookup: config.SRVLookup, + CloneHeaders: config.CloneHeaders, + CloneToken: config.CloneToken, + ReadYourWrites: config.ReadYourWrites, } client, err := NewClient(newConfig) if err != nil { @@ -1131,12 +1212,23 @@ func (c *Client) rawRequestWithContext(ctx context.Context, r *Request) (*Respon checkRetry := c.config.CheckRetry backoff := c.config.Backoff httpClient := c.config.HttpClient + ns := c.headers.Get(consts.NamespaceHeaderName) outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy logger := c.config.Logger c.config.modifyLock.RUnlock() c.modifyLock.RUnlock() + // ensure that the most current namespace setting is used at the time of the call + // e.g. calls using (*Client).WithNamespace + switch ns { + case "": + r.Headers.Del(consts.NamespaceHeaderName) + default: + r.Headers.Set(consts.NamespaceHeaderName, ns) + } + for _, cb := range c.requestCallbacks { cb(r) } @@ -1176,6 +1268,14 @@ START: return nil, LastOutputStringError } + if outputPolicy { + LastOutputPolicyError = &OutputPolicyError{ + method: req.Method, + path: strings.TrimPrefix(req.URL.Path, "/v1"), + } + return nil, LastOutputPolicyError + } + req.Request = req.Request.WithContext(ctx) if backoff == nil { @@ -1268,20 +1368,31 @@ func (c *Client) httpRequestWithContext(ctx context.Context, r *Request) (*Respo limiter := c.config.Limiter httpClient := c.config.HttpClient outputCurlString := c.config.OutputCurlString + outputPolicy := c.config.OutputPolicy + + // add headers if c.headers != nil { for header, vals := range c.headers { for _, val := range vals { req.Header.Add(header, val) } } + // explicitly set the namespace header to current client + if ns := c.headers.Get(consts.NamespaceHeaderName); ns != "" { + r.Headers.Set(consts.NamespaceHeaderName, ns) + } } + c.config.modifyLock.RUnlock() c.modifyLock.RUnlock() - // OutputCurlString logic relies on the request type to be retryable.Request as + // OutputCurlString and OutputPolicy logic rely on the request type to be retryable.Request if outputCurlString { return nil, fmt.Errorf("output-curl-string is not implemented for this request") } + if outputPolicy { + return nil, fmt.Errorf("output-policy is not implemented for this request") + } req.URL.User = r.URL.User req.URL.Scheme = r.URL.Scheme diff --git a/vendor/github.com/hashicorp/vault/api/kv.go b/vendor/github.com/hashicorp/vault/api/kv.go new file mode 100644 index 0000000000..16437582e7 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv.go @@ -0,0 +1,50 @@ +package api + +// A KVSecret is a key-value secret returned by Vault's KV secrets engine, +// and is the most basic type of secret stored in Vault. +// +// Data contains the key-value pairs of the secret itself, +// while Metadata contains a subset of metadata describing +// this particular version of the secret. +// The Metadata field for a KV v1 secret will always be nil, as +// metadata is only supported starting in KV v2. +// +// The Raw field can be inspected for information about the lease, +// and passed to a LifetimeWatcher object for periodic renewal. +type KVSecret struct { + Data map[string]interface{} + VersionMetadata *KVVersionMetadata + CustomMetadata map[string]interface{} + Raw *Secret +} + +// KVv1 is used to return a client for reads and writes against +// a KV v1 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// While v1 is not necessarily deprecated, Vault development servers tend to +// use v2 as the version of the KV secrets engine, as this is what's mounted +// by default when a server is started in -dev mode. See the kvv2 struct. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv1(mountPath string) *KVv1 { + return &KVv1{c: c, mountPath: mountPath} +} + +// KVv2 is used to return a client for reads and writes against +// a KV v2 secrets engine in Vault. +// +// The mount path is the location where the target KV secrets engine resides +// in Vault. +// +// Vault development servers tend to have "secret" as the mount path, +// as these are the default settings when a server is started in -dev mode. +// +// Learn more about the KV secrets engine here: +// https://www.vaultproject.io/docs/secrets/kv +func (c *Client) KVv2(mountPath string) *KVv2 { + return &KVv2{c: c, mountPath: mountPath} +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v1.go b/vendor/github.com/hashicorp/vault/api/kv_v1.go new file mode 100644 index 0000000000..d269070bc3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v1.go @@ -0,0 +1,57 @@ +package api + +import ( + "context" + "fmt" +) + +type KVv1 struct { + c *Client + mountPath string +} + +// Get returns a secret from the KV v1 secrets engine. +func (kv *KVv1) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("no secret found at %s", pathToRead) + } + + return &KVSecret{ + Data: secret.Data, + VersionMetadata: nil, + Raw: secret, + }, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) into the +// KV v1 secrets engine. +// +// If the secret already exists, it will be overwritten. +func (kv *KVv1) Put(ctx context.Context, secretPath string, data map[string]interface{}) error { + pathToWriteTo := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, data) + if err != nil { + return fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes a secret from the KV v1 secrets engine. +func (kv *KVv1) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/kv_v2.go b/vendor/github.com/hashicorp/vault/api/kv_v2.go new file mode 100644 index 0000000000..f0f59abfe5 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/kv_v2.go @@ -0,0 +1,788 @@ +package api + +import ( + "context" + "fmt" + "sort" + "strconv" + "time" + + "github.com/mitchellh/mapstructure" +) + +type KVv2 struct { + c *Client + mountPath string +} + +// KVMetadata is the full metadata for a given KV v2 secret. +type KVMetadata struct { + CASRequired bool `mapstructure:"cas_required"` + CreatedTime time.Time `mapstructure:"created_time"` + CurrentVersion int `mapstructure:"current_version"` + CustomMetadata map[string]interface{} `mapstructure:"custom_metadata"` + DeleteVersionAfter time.Duration `mapstructure:"delete_version_after"` + MaxVersions int `mapstructure:"max_versions"` + OldestVersion int `mapstructure:"oldest_version"` + UpdatedTime time.Time `mapstructure:"updated_time"` + // Keys are stringified ints, e.g. "3". To get a sorted slice of version metadata, use GetVersionsAsList. + Versions map[string]KVVersionMetadata `mapstructure:"versions"` + Raw *Secret +} + +// KVMetadataPutInput is the subset of metadata that can be replaced for a +// KV v2 secret using the PutMetadata method. +// +// All fields should be explicitly provided, as any fields left unset in the +// struct will be reset to their zero value. +type KVMetadataPutInput struct { + CASRequired bool + CustomMetadata map[string]interface{} + DeleteVersionAfter time.Duration + MaxVersions int +} + +// KVMetadataPatchInput is the subset of metadata that can be manually modified for +// a KV v2 secret using the PatchMetadata method. +// +// The struct's fields are all pointers. A pointer to a field's zero +// value (e.g. false for *bool) implies that field should be reset to its +// zero value after update, whereas a field left as a nil pointer +// (e.g. nil for *bool) implies the field should remain unchanged. +// +// Since maps are already pointers, use an empty map to remove all +// custom metadata. +type KVMetadataPatchInput struct { + CASRequired *bool + CustomMetadata map[string]interface{} + DeleteVersionAfter *time.Duration + MaxVersions *int +} + +// KVVersionMetadata is a subset of metadata for a given version of a KV v2 secret. +type KVVersionMetadata struct { + Version int `mapstructure:"version"` + CreatedTime time.Time `mapstructure:"created_time"` + DeletionTime time.Time `mapstructure:"deletion_time"` + Destroyed bool `mapstructure:"destroyed"` +} + +// Currently supported options: WithOption, WithCheckAndSet, WithMethod +type KVOption func() (key string, value interface{}) + +const ( + KVOptionCheckAndSet = "cas" + KVOptionMethod = "method" + KVMergeMethodPatch = "patch" + KVMergeMethodReadWrite = "rw" +) + +// WithOption can optionally be passed to provide generic options for a +// KV request. Valid keys and values depend on the type of request. +func WithOption(key string, value interface{}) KVOption { + return func() (string, interface{}) { + return key, value + } +} + +// WithCheckAndSet can optionally be passed to perform a check-and-set +// operation on a KV request. If not set, the write will be allowed. +// If cas is set to 0, a write will only be allowed if the key doesn't exist. +// If set to non-zero, the write will only be allowed if the key’s current +// version matches the version specified in the cas parameter. +func WithCheckAndSet(cas int) KVOption { + return WithOption(KVOptionCheckAndSet, cas) +} + +// WithMergeMethod can optionally be passed to dictate which type of +// patch to perform in a Patch request. If set to "patch", then an HTTP PATCH +// request will be issued. If set to "rw", then a read will be performed, +// then a local update, followed by a remote update. Defaults to "patch". +func WithMergeMethod(method string) KVOption { + return WithOption(KVOptionMethod, method) +} + +// Get returns the latest version of a secret from the KV v2 secrets engine. +// +// If the latest version has been deleted, an error will not be thrown, but +// the Data field on the returned secret will be nil, and the Metadata field +// will contain the deletion time. +func (kv *KVv2) Get(ctx context.Context, secretPath string) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, fmt.Errorf("error encountered while reading secret at %s: %w", pathToRead, err) + } + if secret == nil { + return nil, fmt.Errorf("no secret found at %s", pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err) + } + kvSecret.CustomMetadata = cm + + return kvSecret, nil +} + +// GetVersion returns the data and metadata for a specific version of the +// given secret. +// +// If that version has been deleted, the Data field on the +// returned secret will be nil, and the Metadata field will contain the deletion time. +// +// GetVersionsAsList can provide a list of available versions sorted by +// version number, while the response from GetMetadata contains them as a map. +func (kv *KVv2) GetVersion(ctx context.Context, secretPath string, version int) (*KVSecret, error) { + pathToRead := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + queryParams := map[string][]string{"version": {strconv.Itoa(version)}} + secret, err := kv.c.Logical().ReadWithDataWithContext(ctx, pathToRead, queryParams) + if err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("no secret with version %d found at %s", version, pathToRead) + } + + kvSecret, err := extractDataAndVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error parsing secret at %s: %w", pathToRead, err) + } + + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToRead, err) + } + kvSecret.CustomMetadata = cm + + return kvSecret, nil +} + +// GetVersionsAsList returns a subset of the metadata for each version of the secret, sorted by version number. +func (kv *KVv2) GetVersionsAsList(ctx context.Context, secretPath string) ([]KVVersionMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret to determine versions: %w", err) + } + + versionsList := make([]KVVersionMetadata, 0, len(md.Versions)) + for _, versionMetadata := range md.Versions { + versionsList = append(versionsList, versionMetadata) + } + + sort.Slice(versionsList, func(i, j int) bool { return versionsList[i].Version < versionsList[j].Version }) + return versionsList, nil +} + +// GetMetadata returns the full metadata for a given secret, including a map of +// its existing versions and their respective creation/deletion times, etc. +func (kv *KVv2) GetMetadata(ctx context.Context, secretPath string) (*KVMetadata, error) { + pathToRead := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + secret, err := kv.c.Logical().ReadWithContext(ctx, pathToRead) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, fmt.Errorf("no secret metadata found at %s", pathToRead) + } + + md, err := extractFullMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to extract metadata from secret: %w", err) + } + + return md, nil +} + +// Put inserts a key-value secret (e.g. {"password": "Hashi123"}) +// into the KV v2 secrets engine. +// +// If the secret already exists, a new version will be created +// and the previous version can be accessed with the GetVersion method. +// GetMetadata can provide a list of available versions. +func (kv *KVv2) Put(ctx context.Context, secretPath string, data map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToWriteTo := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + wrappedData := map[string]interface{}{ + "data": data, + } + + // Add options such as check-and-set, etc. + // We leave this as an optional arg so that most users + // can just pass plain key-value secret data without + // having to remember to put the extra layer "data" in there. + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, wrappedData) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", pathToWriteTo, err) + } + if secret == nil { + return nil, fmt.Errorf("no secret was written to %s", pathToWriteTo) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret at %s: %w", pathToWriteTo, err) + } + kvSecret.CustomMetadata = cm + + return kvSecret, nil +} + +// PutMetadata can be used to fully replace a subset of metadata fields for a +// given KV v2 secret. All fields will replace the corresponding values on the Vault server. +// Any fields left as nil will reset the field on the Vault server back to its zero value. +// +// To only partially replace the values of these metadata fields, use PatchMetadata. +// +// This method can also be used to create a new secret with just metadata and no secret data yet. +func (kv *KVv2) PutMetadata(ctx context.Context, secretPath string, metadata KVMetadataPutInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // convert values to a map we can pass to Logical + metadataMap := make(map[string]interface{}) + metadataMap[maxVersionsKey] = metadata.MaxVersions + metadataMap[deleteVersionAfterKey] = metadata.DeleteVersionAfter.String() + metadataMap[casRequiredKey] = metadata.CASRequired + metadataMap[customMetadataKey] = metadata.CustomMetadata + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToWriteTo, metadataMap) + if err != nil { + return fmt.Errorf("error writing secret metadata to %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Patch additively updates the most recent version of a key-value secret, +// differentiating it from Put which will fully overwrite the previous data. +// Only the key-value pairs that are new or changing need to be provided. +// +// The WithMethod KVOption function can optionally be passed to dictate which +// kind of patch to perform, as older Vault server versions (pre-1.9.0) may +// only be able to use the old "rw" (read-then-write) style of partial update, +// whereas newer Vault servers can use the default value of "patch" if the +// client token's policy has the "patch" capability. +func (kv *KVv2) Patch(ctx context.Context, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + // determine patch method + var patchMethod string + var ok bool + for _, opt := range opts { + k, v := opt() + if k == "method" { + patchMethod, ok = v.(string) + if !ok { + return nil, fmt.Errorf("unsupported type provided for option value; value for patch method should be string \"rw\" or \"patch\"") + } + } + } + + // Determine which kind of patch to use, + // the newer HTTP Patch style or the older read-then-write style + var kvs *KVSecret + var perr error + switch patchMethod { + case "rw": + kvs, perr = readThenWrite(ctx, kv.c, kv.mountPath, secretPath, newData) + case "patch": + kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + case "": + kvs, perr = mergePatch(ctx, kv.c, kv.mountPath, secretPath, newData, opts...) + default: + return nil, fmt.Errorf("unsupported patch method provided; value for patch method should be string \"rw\" or \"patch\"") + } + if perr != nil { + return nil, fmt.Errorf("unable to perform patch: %w", perr) + } + if kvs == nil { + return nil, fmt.Errorf("no secret was written to %s", secretPath) + } + + return kvs, nil +} + +// PatchMetadata can be used to replace just a subset of a secret's +// metadata fields at a time, as opposed to PutMetadata which is used to +// completely replace all fields on the previous metadata. +func (kv *KVv2) PatchMetadata(ctx context.Context, secretPath string, metadata KVMetadataPatchInput) error { + pathToWriteTo := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + md, err := toMetadataMap(metadata) + if err != nil { + return fmt.Errorf("unable to create map for JSON merge patch request: %w", err) + } + + _, err = kv.c.Logical().JSONMergePatch(ctx, pathToWriteTo, md) + if err != nil { + return fmt.Errorf("error patching metadata at %s: %w", pathToWriteTo, err) + } + + return nil +} + +// Delete deletes the most recent version of a secret from the KV v2 +// secrets engine. To delete an older version, use DeleteVersions. +func (kv *KVv2) Delete(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/data/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteVersions deletes the specified versions of a secret from the KV v2 +// secrets engine. To delete the latest version of a secret, just use Delete. +func (kv *KVv2) DeleteVersions(ctx context.Context, secretPath string, versions []int) error { + // verb and path are different when trying to delete past versions + pathToDelete := fmt.Sprintf("%s/delete/%s", kv.mountPath, secretPath) + + if len(versions) == 0 { + return nil + } + + var versionsToDelete []string + for _, version := range versions { + versionsToDelete = append(versionsToDelete, strconv.Itoa(version)) + } + versionsMap := map[string]interface{}{ + "versions": versionsToDelete, + } + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDelete, versionsMap) + if err != nil { + return fmt.Errorf("error deleting secret at %s: %w", pathToDelete, err) + } + + return nil +} + +// DeleteMetadata deletes all versions and metadata of the secret at the +// given path. +func (kv *KVv2) DeleteMetadata(ctx context.Context, secretPath string) error { + pathToDelete := fmt.Sprintf("%s/metadata/%s", kv.mountPath, secretPath) + + _, err := kv.c.Logical().DeleteWithContext(ctx, pathToDelete) + if err != nil { + return fmt.Errorf("error deleting secret metadata at %s: %w", pathToDelete, err) + } + + return nil +} + +// Undelete undeletes the given versions of a secret, restoring the data +// so that it can be fetched again with Get requests. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Undelete(ctx context.Context, secretPath string, versions []int) error { + pathToUndelete := fmt.Sprintf("%s/undelete/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToUndelete, data) + if err != nil { + return fmt.Errorf("error undeleting secret metadata at %s: %w", pathToUndelete, err) + } + + return nil +} + +// Destroy permanently removes the specified secret versions' data +// from the Vault server. If no secret exists at the given path, no +// action will be taken. +// +// A list of existing versions can be retrieved using the GetVersionsAsList method. +func (kv *KVv2) Destroy(ctx context.Context, secretPath string, versions []int) error { + pathToDestroy := fmt.Sprintf("%s/destroy/%s", kv.mountPath, secretPath) + + data := map[string]interface{}{ + "versions": versions, + } + + _, err := kv.c.Logical().WriteWithContext(ctx, pathToDestroy, data) + if err != nil { + return fmt.Errorf("error destroying secret metadata at %s: %w", pathToDestroy, err) + } + + return nil +} + +// Rollback can be used to roll a secret back to a previous +// non-deleted/non-destroyed version. That previous version becomes the +// next/newest version for the path. +func (kv *KVv2) Rollback(ctx context.Context, secretPath string, toVersion int) (*KVSecret, error) { + // First, do a read to get the current version for check-and-set + latest, err := kv.Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("unable to get latest version of secret: %w", err) + } + + // Make sure a value already exists + if latest == nil { + return nil, fmt.Errorf("no secret was found: %w", err) + } + + // Verify metadata found + if latest.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found; rollback can only be used on existing data") + } + + // Now run it again and read the version we want to roll back to + rollbackVersion, err := kv.GetVersion(ctx, secretPath, toVersion) + if err != nil { + return nil, fmt.Errorf("unable to get previous version %d of secret: %s", toVersion, err) + } + + err = validateRollbackVersion(rollbackVersion) + if err != nil { + return nil, fmt.Errorf("invalid rollback version %d: %w", toVersion, err) + } + + casVersion := latest.VersionMetadata.Version + kvs, err := kv.Put(ctx, secretPath, rollbackVersion.Data, WithCheckAndSet(casVersion)) + if err != nil { + return nil, fmt.Errorf("unable to roll back to previous secret version: %w", err) + } + + return kvs, nil +} + +func extractCustomMetadata(secret *Secret) (map[string]interface{}, error) { + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + customMetadataInterface, ok := secret.Data["custom_metadata"] + if !ok { + metadataInterface, ok := secret.Data["metadata"] + if !ok { // if that's not found, bail since it should have had one or the other + return nil, fmt.Errorf("secret is missing expected fields") + } + metadataMap, ok := metadataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + customMetadataInterface, ok = metadataMap["custom_metadata"] + if !ok { + return nil, fmt.Errorf("metadata missing expected field \"custom_metadata\": %v", metadataMap) + } + } + + cm, ok := customMetadataInterface.(map[string]interface{}) + if !ok && customMetadataInterface != nil { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", customMetadataInterface, customMetadataInterface) + } + + return cm, nil +} + +func extractDataAndVersionMetadata(secret *Secret) (*KVSecret, error) { + // A nil map is a valid value for data: secret.Data will be nil when this + // version of the secret has been deleted, but the metadata is still + // available. + var data map[string]interface{} + if secret.Data != nil { + dataInterface, ok := secret.Data["data"] + if !ok { + return nil, fmt.Errorf("missing expected 'data' element") + } + + if dataInterface != nil { + data, ok = dataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'data' element: %T (%#v)", data, data) + } + } + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("unable to get version metadata: %w", err) + } + + return &KVSecret{ + Data: data, + VersionMetadata: metadata, + Raw: secret, + }, nil +} + +func extractVersionMetadata(secret *Secret) (*KVVersionMetadata, error) { + var metadata *KVVersionMetadata + + if secret.Data == nil { + return nil, nil + } + + // Logical Writes return the metadata directly, Reads return it nested inside the "metadata" key + var metadataMap map[string]interface{} + metadataInterface, ok := secret.Data["metadata"] + if ok { + metadataMap, ok = metadataInterface.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type for 'metadata' element: %T (%#v)", metadataInterface, metadataInterface) + } + } else { + metadataMap = secret.Data + } + + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeHookFunc(time.RFC3339), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(metadataMap) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into VersionMetadata: %w", err) + } + + return metadata, nil +} + +func extractFullMetadata(secret *Secret) (*KVMetadata, error) { + var metadata *KVMetadata + + if secret.Data == nil { + return nil, nil + } + + if versions, ok := secret.Data["versions"]; ok { + versionsMap := versions.(map[string]interface{}) + if len(versionsMap) > 0 { + for version, metadata := range versionsMap { + metadataMap := metadata.(map[string]interface{}) + // deletion_time usually comes in as an empty string which can't be + // processed as time.RFC3339, so we reset it to a convertible value + if metadataMap["deletion_time"] == "" { + metadataMap["deletion_time"] = time.Time{} + } + versionInt, err := strconv.Atoi(version) + if err != nil { + return nil, fmt.Errorf("error converting version %s to integer: %w", version, err) + } + metadataMap["version"] = versionInt + versionsMap[version] = metadataMap // save the updated copy of the metadata map + } + } + secret.Data["versions"] = versionsMap // save the updated copy of the versions map + } + + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeHookFunc(time.RFC3339), + mapstructure.StringToTimeDurationHookFunc(), + ), + Result: &metadata, + }) + if err != nil { + return nil, fmt.Errorf("error setting up decoder for API response: %w", err) + } + + err = d.Decode(secret.Data) + if err != nil { + return nil, fmt.Errorf("error decoding metadata from API response into KVMetadata: %w", err) + } + + return metadata, nil +} + +func validateRollbackVersion(rollbackVersion *KVSecret) error { + // Make sure a value already exists + if rollbackVersion == nil || rollbackVersion.Data == nil { + return fmt.Errorf("no secret found") + } + + // Verify metadata found + if rollbackVersion.VersionMetadata == nil { + return fmt.Errorf("no version metadata found; rollback only works on existing data") + } + + // Verify it hasn't been deleted + if !rollbackVersion.VersionMetadata.DeletionTime.IsZero() { + return fmt.Errorf("cannot roll back to a version that has been deleted") + } + + if rollbackVersion.VersionMetadata.Destroyed { + return fmt.Errorf("cannot roll back to a version that has been destroyed") + } + + // Verify old data found + if rollbackVersion.Data == nil { + return fmt.Errorf("no data found; rollback only works on existing data") + } + + return nil +} + +func mergePatch(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}, opts ...KVOption) (*KVSecret, error) { + pathToMergePatch := fmt.Sprintf("%s/data/%s", mountPath, secretPath) + + // take any other additional options provided + // and pass them along to the patch request + wrappedData := map[string]interface{}{ + "data": newData, + } + options := make(map[string]interface{}) + for _, opt := range opts { + k, v := opt() + options[k] = v + } + if len(opts) > 0 { + wrappedData["options"] = options + } + + secret, err := client.Logical().JSONMergePatch(ctx, pathToMergePatch, wrappedData) + if err != nil { + // If it's a 405, that probably means the server is running a pre-1.9 + // Vault version that doesn't support the HTTP PATCH method. + // Fall back to the old way of doing it. + if re, ok := err.(*ResponseError); ok && re.StatusCode == 405 { + return readThenWrite(ctx, client, mountPath, secretPath, newData) + } + + if re, ok := err.(*ResponseError); ok && re.StatusCode == 403 { + return nil, fmt.Errorf("received 403 from Vault server; please ensure that token's policy has \"patch\" capability: %w", err) + } + + return nil, fmt.Errorf("error performing merge patch to %s: %s", pathToMergePatch, err) + } + + metadata, err := extractVersionMetadata(secret) + if err != nil { + return nil, fmt.Errorf("secret was written successfully, but unable to view version metadata from response: %w", err) + } + + kvSecret := &KVSecret{ + Data: nil, // secret.Data in this case is the metadata + VersionMetadata: metadata, + Raw: secret, + } + + cm, err := extractCustomMetadata(secret) + if err != nil { + return nil, fmt.Errorf("error reading custom metadata for secret %s: %w", secretPath, err) + } + kvSecret.CustomMetadata = cm + + return kvSecret, nil +} + +func readThenWrite(ctx context.Context, client *Client, mountPath string, secretPath string, newData map[string]interface{}) (*KVSecret, error) { + // First, read the secret. + existingVersion, err := client.KVv2(mountPath).Get(ctx, secretPath) + if err != nil { + return nil, fmt.Errorf("error reading secret as part of read-then-write patch operation: %w", err) + } + + // Make sure the secret already exists + if existingVersion == nil || existingVersion.Data == nil { + return nil, fmt.Errorf("no existing secret was found at %s when doing read-then-write patch operation: %w", secretPath, err) + } + + // Verify existing secret has metadata + if existingVersion.VersionMetadata == nil { + return nil, fmt.Errorf("no metadata found at %s; patch can only be used on existing data", secretPath) + } + + // Copy new data over with existing data + combinedData := existingVersion.Data + for k, v := range newData { + combinedData[k] = v + } + + updatedSecret, err := client.KVv2(mountPath).Put(ctx, secretPath, combinedData, WithCheckAndSet(existingVersion.VersionMetadata.Version)) + if err != nil { + return nil, fmt.Errorf("error writing secret to %s: %w", secretPath, err) + } + + return updatedSecret, nil +} + +func toMetadataMap(patchInput KVMetadataPatchInput) (map[string]interface{}, error) { + metadataMap := make(map[string]interface{}) + + const ( + casRequiredKey = "cas_required" + deleteVersionAfterKey = "delete_version_after" + maxVersionsKey = "max_versions" + customMetadataKey = "custom_metadata" + ) + + // The KVMetadataPatchInput struct is designed to have pointer fields so that + // the user can easily express the difference between explicitly setting a + // field back to its zero value (e.g. false), as opposed to just having + // the field remain unchanged (e.g. nil). This way, they only need to pass + // the fields they want to change. + if patchInput.MaxVersions != nil { + metadataMap[maxVersionsKey] = *(patchInput.MaxVersions) + } + if patchInput.CASRequired != nil { + metadataMap[casRequiredKey] = *(patchInput.CASRequired) + } + if patchInput.CustomMetadata != nil { + if len(patchInput.CustomMetadata) == 0 { // empty non-nil map means delete all the keys + metadataMap[customMetadataKey] = nil + } else { + metadataMap[customMetadataKey] = patchInput.CustomMetadata + } + } + if patchInput.DeleteVersionAfter != nil { + metadataMap[deleteVersionAfterKey] = patchInput.DeleteVersionAfter.String() + } + + return metadataMap, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go index f775dfb15a..f06263526f 100644 --- a/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go +++ b/vendor/github.com/hashicorp/vault/api/lifetime_watcher.go @@ -113,7 +113,9 @@ type LifetimeWatcherInput struct { // The new TTL, in seconds, that should be set on the lease. The TTL set // here may or may not be honored by the vault server, based on Vault - // configuration or any associated max TTL values. + // configuration or any associated max TTL values. If specified, the + // minimum of this value and the remaining lease duration will be used + // for grace period calculations. Increment int // RenewBehavior controls what happens when a renewal errors or the @@ -257,7 +259,7 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, initialTime := time.Now() priorDuration := time.Duration(initLeaseDuration) * time.Second - r.calculateGrace(priorDuration) + r.calculateGrace(priorDuration, time.Duration(r.increment)*time.Second) var errorBackoff backoff.BackOff for { @@ -345,7 +347,7 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, // extending. Once it stops extending, we've hit the max and need to // rely on the grace duration. if remainingLeaseDuration > priorDuration { - r.calculateGrace(remainingLeaseDuration) + r.calculateGrace(remainingLeaseDuration, time.Duration(r.increment)*time.Second) } priorDuration = remainingLeaseDuration @@ -373,16 +375,21 @@ func (r *LifetimeWatcher) doRenewWithOptions(tokenMode bool, nonRenewable bool, } } -// calculateGrace calculates the grace period based on a reasonable set of -// assumptions given the total lease time; it also adds some jitter to not have -// clients be in sync. -func (r *LifetimeWatcher) calculateGrace(leaseDuration time.Duration) { - if leaseDuration <= 0 { +// calculateGrace calculates the grace period based on the minimum of the +// remaining lease duration and the token increment value; it also adds some +// jitter to not have clients be in sync. +func (r *LifetimeWatcher) calculateGrace(leaseDuration, increment time.Duration) { + minDuration := leaseDuration + if minDuration > increment && increment > 0 { + minDuration = increment + } + + if minDuration <= 0 { r.grace = 0 return } - leaseNanos := float64(leaseDuration.Nanoseconds()) + leaseNanos := float64(minDuration.Nanoseconds()) jitterMax := 0.1 * leaseNanos // For a given lease duration, we want to allow 80-90% of that to elapse, diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index 39d61b96ab..747b9bc12c 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -323,7 +323,7 @@ func (c *Logical) UnwrapWithContext(ctx context.Context, wrappingToken string) ( c.c.SetToken(wrappingToken) } - secret, err = c.Read(wrappedResponseLocation) + secret, err = c.ReadWithContext(ctx, wrappedResponseLocation) if err != nil { return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) } diff --git a/vendor/github.com/hashicorp/vault/api/output_policy.go b/vendor/github.com/hashicorp/vault/api/output_policy.go new file mode 100644 index 0000000000..85d1617e5e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_policy.go @@ -0,0 +1,82 @@ +package api + +import ( + "fmt" + "net/http" + "net/url" + "strings" +) + +const ( + ErrOutputPolicyRequest = "output a policy, please" +) + +var LastOutputPolicyError *OutputPolicyError + +type OutputPolicyError struct { + method string + path string + finalHCLString string +} + +func (d *OutputPolicyError) Error() string { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return err.Error() + } + d.finalHCLString = p + } + + return ErrOutputPolicyRequest +} + +func (d *OutputPolicyError) HCLString() (string, error) { + if d.finalHCLString == "" { + p, err := d.buildSamplePolicy() + if err != nil { + return "", err + } + d.finalHCLString = p + } + return d.finalHCLString, nil +} + +// Builds a sample policy document from the request +func (d *OutputPolicyError) buildSamplePolicy() (string, error) { + var capabilities []string + switch d.method { + case http.MethodGet, "": + capabilities = append(capabilities, "read") + case http.MethodPost, http.MethodPut: + capabilities = append(capabilities, "create") + capabilities = append(capabilities, "update") + case http.MethodPatch: + capabilities = append(capabilities, "patch") + case http.MethodDelete: + capabilities = append(capabilities, "delete") + case "LIST": + capabilities = append(capabilities, "list") + } + + // sanitize, then trim the Vault address and v1 from the front of the path + path, err := url.PathUnescape(d.path) + if err != nil { + return "", fmt.Errorf("failed to unescape request URL characters: %v", err) + } + + // determine whether to add sudo capability + if IsSudoPath(path) { + capabilities = append(capabilities, "sudo") + } + + // the OpenAPI response has a / in front of each path, + // but policies need the path without that leading slash + path = strings.TrimLeft(path, "/") + + capStr := strings.Join(capabilities, `", "`) + return fmt.Sprintf( + `path "%s" { + capabilities = ["%s"] +}`, path, capStr), nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go index 9129ea0c3f..b8c396ebc0 100644 --- a/vendor/github.com/hashicorp/vault/api/output_string.go +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -19,58 +19,68 @@ type OutputStringError struct { TLSSkipVerify bool ClientCACert, ClientCAPath string ClientCert, ClientKey string - parsingError error - parsedCurlString string + finalCurlString string } func (d *OutputStringError) Error() string { - if d.parsedCurlString == "" { - d.parseRequest() - if d.parsingError != nil { - return d.parsingError.Error() + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return err.Error() } + d.finalCurlString = cs } return ErrOutputStringRequest } -func (d *OutputStringError) parseRequest() { +func (d *OutputStringError) CurlString() (string, error) { + if d.finalCurlString == "" { + cs, err := d.buildCurlString() + if err != nil { + return "", err + } + d.finalCurlString = cs + } + return d.finalCurlString, nil +} + +func (d *OutputStringError) buildCurlString() (string, error) { body, err := d.Request.BodyBytes() if err != nil { - d.parsingError = err - return + return "", err } // Build cURL string - d.parsedCurlString = "curl " + finalCurlString := "curl " if d.TLSSkipVerify { - d.parsedCurlString += "--insecure " + finalCurlString += "--insecure " } if d.Request.Method != http.MethodGet { - d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method) + finalCurlString = fmt.Sprintf("%s-X %s ", finalCurlString, d.Request.Method) } if d.ClientCACert != "" { clientCACert := strings.Replace(d.ClientCACert, "'", "'\"'\"'", -1) - d.parsedCurlString = fmt.Sprintf("%s--cacert '%s' ", d.parsedCurlString, clientCACert) + finalCurlString = fmt.Sprintf("%s--cacert '%s' ", finalCurlString, clientCACert) } if d.ClientCAPath != "" { clientCAPath := strings.Replace(d.ClientCAPath, "'", "'\"'\"'", -1) - d.parsedCurlString = fmt.Sprintf("%s--capath '%s' ", d.parsedCurlString, clientCAPath) + finalCurlString = fmt.Sprintf("%s--capath '%s' ", finalCurlString, clientCAPath) } if d.ClientCert != "" { clientCert := strings.Replace(d.ClientCert, "'", "'\"'\"'", -1) - d.parsedCurlString = fmt.Sprintf("%s--cert '%s' ", d.parsedCurlString, clientCert) + finalCurlString = fmt.Sprintf("%s--cert '%s' ", finalCurlString, clientCert) } if d.ClientKey != "" { clientKey := strings.Replace(d.ClientKey, "'", "'\"'\"'", -1) - d.parsedCurlString = fmt.Sprintf("%s--key '%s' ", d.parsedCurlString, clientKey) + finalCurlString = fmt.Sprintf("%s--key '%s' ", finalCurlString, clientKey) } for k, v := range d.Request.Header { for _, h := range v { if strings.ToLower(k) == "x-vault-token" { h = `$(vault print token)` } - d.parsedCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", d.parsedCurlString, k, h) + finalCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", finalCurlString, k, h) } } @@ -78,15 +88,8 @@ func (d *OutputStringError) parseRequest() { // We need to escape single quotes since that's what we're using to // quote the body escapedBody := strings.Replace(string(body), "'", "'\"'\"'", -1) - d.parsedCurlString = fmt.Sprintf("%s-d '%s' ", d.parsedCurlString, escapedBody) + finalCurlString = fmt.Sprintf("%s-d '%s' ", finalCurlString, escapedBody) } - d.parsedCurlString = fmt.Sprintf("%s%s", d.parsedCurlString, d.Request.URL.String()) -} - -func (d *OutputStringError) CurlString() string { - if d.parsedCurlString == "" { - d.parseRequest() - } - return d.parsedCurlString + return fmt.Sprintf("%s%s", finalCurlString, d.Request.URL.String()), nil } diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go index e7da60cc55..e8ceb9c2fd 100644 --- a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -9,6 +9,7 @@ import ( "flag" "net/url" "os" + "regexp" squarejwt "gopkg.in/square/go-jose.v2/jwt" @@ -23,6 +24,49 @@ var ( // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the // plugin. PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" + + // sudoPaths is a map containing the paths that require a token's policy + // to have the "sudo" capability. The keys are the paths as strings, in + // the same format as they are returned by the OpenAPI spec. The values + // are the regular expressions that can be used to test whether a given + // path matches that path or not (useful specifically for the paths that + // contain templated fields.) + sudoPaths = map[string]*regexp.Regexp{ + "/auth/token/accessors/": regexp.MustCompile(`^/auth/token/accessors/$`), + "/pki/root": regexp.MustCompile(`^/pki/root$`), + "/pki/root/sign-self-issued": regexp.MustCompile(`^/pki/root/sign-self-issued$`), + "/sys/audit": regexp.MustCompile(`^/sys/audit$`), + "/sys/audit/{path}": regexp.MustCompile(`^/sys/audit/.+$`), + "/sys/auth/{path}": regexp.MustCompile(`^/sys/auth/.+$`), + "/sys/auth/{path}/tune": regexp.MustCompile(`^/sys/auth/.+/tune$`), + "/sys/config/auditing/request-headers": regexp.MustCompile(`^/sys/config/auditing/request-headers$`), + "/sys/config/auditing/request-headers/{header}": regexp.MustCompile(`^/sys/config/auditing/request-headers/.+$`), + "/sys/config/cors": regexp.MustCompile(`^/sys/config/cors$`), + "/sys/config/ui/headers/": regexp.MustCompile(`^/sys/config/ui/headers/$`), + "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), + "/sys/leases": regexp.MustCompile(`^/sys/leases$`), + "/sys/leases/lookup/": regexp.MustCompile(`^/sys/leases/lookup/$`), + "/sys/leases/lookup/{prefix}": regexp.MustCompile(`^/sys/leases/lookup/.+$`), + "/sys/leases/revoke-force/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-force/.+$`), + "/sys/leases/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/leases/revoke-prefix/.+$`), + "/sys/plugins/catalog/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[^/]+$`), + "/sys/plugins/catalog/{type}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+$`), + "/sys/plugins/catalog/{type}/{name}": regexp.MustCompile(`^/sys/plugins/catalog/[\w-]+/[^/]+$`), + "/sys/raw": regexp.MustCompile(`^/sys/raw$`), + "/sys/raw/{path}": regexp.MustCompile(`^/sys/raw/.+$`), + "/sys/remount": regexp.MustCompile(`^/sys/remount$`), + "/sys/revoke-force/{prefix}": regexp.MustCompile(`^/sys/revoke-force/.+$`), + "/sys/revoke-prefix/{prefix}": regexp.MustCompile(`^/sys/revoke-prefix/.+$`), + "/sys/rotate": regexp.MustCompile(`^/sys/rotate$`), + + // enterprise-only paths + "/sys/replication/dr/primary/secondary-token": regexp.MustCompile(`^/sys/replication/dr/primary/secondary-token$`), + "/sys/replication/performance/primary/secondary-token": regexp.MustCompile(`^/sys/replication/performance/primary/secondary-token$`), + "/sys/replication/primary/secondary-token": regexp.MustCompile(`^/sys/replication/primary/secondary-token$`), + "/sys/replication/reindex": regexp.MustCompile(`^/sys/replication/reindex$`), + "/sys/storage/raft/snapshot-auto/config/": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/$`), + "/sys/storage/raft/snapshot-auto/config/{name}": regexp.MustCompile(`^/sys/storage/raft/snapshot-auto/config/[^/]+$`), + } ) // PluginAPIClientMeta is a helper that plugins can use to configure TLS connections @@ -192,3 +236,28 @@ func VaultPluginTLSProviderContext(ctx context.Context, apiTLSConfig *TLSConfig) return tlsConfig, nil } } + +func SudoPaths() map[string]*regexp.Regexp { + return sudoPaths +} + +// Determine whether the given path requires the sudo capability +func IsSudoPath(path string) bool { + // Return early if the path is any of the non-templated sudo paths. + if _, ok := sudoPaths[path]; ok { + return true + } + + // Some sudo paths have templated fields in them. + // (e.g. /sys/revoke-prefix/{prefix}) + // The values in the sudoPaths map are actually regular expressions, + // so we can check if our path matches against them. + for _, sudoPathRegexp := range sudoPaths { + match := sudoPathRegexp.MatchString(path) + if match { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index a3a288bf14..77e3ee9a9e 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -94,12 +94,7 @@ func (s *Secret) TokenRemainingUses() (int, error) { return -1, nil } - uses, err := parseutil.ParseInt(s.Data["num_uses"]) - if err != nil { - return 0, err - } - - return int(uses), nil + return parseutil.SafeParseInt(s.Data["num_uses"]) } // TokenPolicies returns the standardized list of policies for the given secret. diff --git a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go index 35bf403366..d89d59651a 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_hastatus.go +++ b/vendor/github.com/hashicorp/vault/api/sys_hastatus.go @@ -37,4 +37,7 @@ type HANode struct { ClusterAddress string `json:"cluster_address"` ActiveNode bool `json:"active_node"` LastEcho *time.Time `json:"last_echo"` + Version string `json:"version"` + UpgradeVersion string `json:"upgrade_version,omitempty"` + RedundancyZone string `json:"redundancy_zone,omitempty"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_mfa.go b/vendor/github.com/hashicorp/vault/api/sys_mfa.go new file mode 100644 index 0000000000..a1ba1bd80f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mfa.go @@ -0,0 +1,45 @@ +package api + +import ( + "context" + "fmt" + "net/http" +) + +func (c *Sys) MFAValidate(requestID string, payload map[string]interface{}) (*Secret, error) { + return c.MFAValidateWithContext(context.Background(), requestID, payload) +} + +func (c *Sys) MFAValidateWithContext(ctx context.Context, requestID string, payload map[string]interface{}) (*Secret, error) { + ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) + defer cancelFunc() + + body := map[string]interface{}{ + "mfa_request_id": requestID, + "mfa_payload": payload, + } + + r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mfa/validate")) + if err := r.SetJSONBody(body); err != nil { + return nil, fmt.Errorf("failed to set request body: %w", err) + } + + resp, err := c.c.rawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to parse secret from response: %w", err) + } + + if secret == nil { + return nil, fmt.Errorf("data from server response is empty") + } + + return secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_monitor.go b/vendor/github.com/hashicorp/vault/api/sys_monitor.go index df27746728..6813799f01 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_monitor.go +++ b/vendor/github.com/hashicorp/vault/api/sys_monitor.go @@ -5,11 +5,13 @@ import ( "context" "fmt" "net/http" + + "github.com/hashicorp/vault/sdk/helper/logging" ) // Monitor returns a channel that outputs strings containing the log messages // coming from the server. -func (c *Sys) Monitor(ctx context.Context, logLevel string) (chan string, error) { +func (c *Sys) Monitor(ctx context.Context, logLevel string, logFormat string) (chan string, error) { r := c.c.NewRequest(http.MethodGet, "/v1/sys/monitor") if logLevel == "" { @@ -18,6 +20,12 @@ func (c *Sys) Monitor(ctx context.Context, logLevel string) (chan string, error) r.Params.Add("log_level", logLevel) } + if logFormat == "" || logFormat == logging.UnspecifiedFormat.String() { + r.Params.Add("log_format", "standard") + } else { + r.Params.Add("log_format", logFormat) + } + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index 920af4c3cb..004ee222bf 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -100,23 +100,24 @@ func (c *Sys) ListPluginsWithContext(ctx context.Context, i *ListPluginsInput) ( PluginsByType: make(map[consts.PluginType][]string), } if i.Type == consts.PluginTypeUnknown { - for pluginTypeStr, pluginsRaw := range secret.Data { - pluginType, err := consts.ParsePluginType(pluginTypeStr) - if err != nil { - return nil, err + for _, pluginType := range consts.PluginTypes { + pluginsRaw, ok := secret.Data[pluginType.String()] + if !ok { + continue } pluginsIfc, ok := pluginsRaw.([]interface{}) if !ok { - return nil, fmt.Errorf("unable to parse plugins for %q type", pluginTypeStr) + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginType.String()) } - plugins := make([]string, len(pluginsIfc)) - for i, nameIfc := range pluginsIfc { + plugins := make([]string, 0, len(pluginsIfc)) + for _, nameIfc := range pluginsIfc { name, ok := nameIfc.(string) if !ok { + continue } - plugins[i] = name + plugins = append(plugins, name) } result.PluginsByType[pluginType] = plugins } diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go index df10bf672e..7806a1418d 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_raft.go +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -44,6 +44,7 @@ type AutopilotConfig struct { MaxTrailingLogs uint64 `json:"max_trailing_logs" mapstructure:"max_trailing_logs"` MinQuorum uint `json:"min_quorum" mapstructure:"min_quorum"` ServerStabilizationTime time.Duration `json:"server_stabilization_time" mapstructure:"-"` + DisableUpgradeMigration bool `json:"disable_upgrade_migration" mapstructure:"disable_upgrade_migration"` } // MarshalJSON makes the autopilot config fields JSON compatible @@ -55,6 +56,7 @@ func (ac *AutopilotConfig) MarshalJSON() ([]byte, error) { "max_trailing_logs": ac.MaxTrailingLogs, "min_quorum": ac.MinQuorum, "server_stabilization_time": ac.ServerStabilizationTime.String(), + "disable_upgrade_migration": ac.DisableUpgradeMigration, }) } @@ -84,28 +86,59 @@ func (ac *AutopilotConfig) UnmarshalJSON(b []byte) error { // AutopilotState represents the response of the raft autopilot state API type AutopilotState struct { - Healthy bool `mapstructure:"healthy"` - FailureTolerance int `mapstructure:"failure_tolerance"` - Servers map[string]*AutopilotServer `mapstructure:"servers"` - Leader string `mapstructure:"leader"` - Voters []string `mapstructure:"voters"` - NonVoters []string `mapstructure:"non_voters"` + Healthy bool `mapstructure:"healthy"` + FailureTolerance int `mapstructure:"failure_tolerance"` + Servers map[string]*AutopilotServer `mapstructure:"servers"` + Leader string `mapstructure:"leader"` + Voters []string `mapstructure:"voters"` + NonVoters []string `mapstructure:"non_voters"` + RedundancyZones map[string]AutopilotZone `mapstructure:"redundancy_zones,omitempty"` + Upgrade *AutopilotUpgrade `mapstructure:"upgrade_info,omitempty"` + OptimisticFailureTolerance int `mapstructure:"optimistic_failure_tolerance,omitempty"` } // AutopilotServer represents the server blocks in the response of the raft // autopilot state API. type AutopilotServer struct { - ID string `mapstructure:"id"` - Name string `mapstructure:"name"` - Address string `mapstructure:"address"` - NodeStatus string `mapstructure:"node_status"` - LastContact string `mapstructure:"last_contact"` - LastTerm uint64 `mapstructure:"last_term"` - LastIndex uint64 `mapstructure:"last_index"` - Healthy bool `mapstructure:"healthy"` - StableSince string `mapstructure:"stable_since"` - Status string `mapstructure:"status"` - Meta map[string]string `mapstructure:"meta"` + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + Address string `mapstructure:"address"` + NodeStatus string `mapstructure:"node_status"` + LastContact string `mapstructure:"last_contact"` + LastTerm uint64 `mapstructure:"last_term"` + LastIndex uint64 `mapstructure:"last_index"` + Healthy bool `mapstructure:"healthy"` + StableSince string `mapstructure:"stable_since"` + Status string `mapstructure:"status"` + Version string `mapstructure:"version"` + UpgradeVersion string `mapstructure:"upgrade_version,omitempty"` + RedundancyZone string `mapstructure:"redundancy_zone,omitempty"` + NodeType string `mapstructure:"node_type,omitempty"` +} + +type AutopilotZone struct { + Servers []string `mapstructure:"servers,omitempty"` + Voters []string `mapstructure:"voters,omitempty"` + FailureTolerance int `mapstructure:"failure_tolerance,omitempty"` +} + +type AutopilotUpgrade struct { + Status string `mapstructure:"status"` + TargetVersion string `mapstructure:"target_version,omitempty"` + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + TargetVersionReadReplicas []string `mapstructure:"target_version_read_replicas,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` + OtherVersionReadReplicas []string `mapstructure:"other_version_read_replicas,omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `mapstructure:"redundancy_zones,omitempty"` +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `mapstructure:"target_version_voters,omitempty"` + TargetVersionNonVoters []string `mapstructure:"target_version_non_voters,omitempty"` + OtherVersionVoters []string `mapstructure:"other_version_voters,omitempty"` + OtherVersionNonVoters []string `mapstructure:"other_version_non_voters,omitempty"` } // RaftJoin wraps RaftJoinWithContext using context.Background. diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index dcd8d32a5b..189d61469a 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -101,6 +101,7 @@ type SealStatusResponse struct { Progress int `json:"progress"` Nonce string `json:"nonce"` Version string `json:"version"` + BuildDate string `json:"build_date"` Migration bool `json:"migration"` ClusterName string `json:"cluster_name,omitempty"` ClusterID string `json:"cluster_id,omitempty"` diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index 046a3d3014..38fe3ed1f0 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -2,6 +2,7 @@ package yamux import ( "bufio" + "bytes" "fmt" "io" "io/ioutil" @@ -63,23 +64,26 @@ type Session struct { // sendCh is used to mark a stream as ready to send, // or to send a header out directly. - sendCh chan sendReady + sendCh chan *sendReady // recvDoneCh is closed when recv() exits to avoid a race // between stream registration and stream shutdown recvDoneCh chan struct{} + sendDoneCh chan struct{} // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex } // sendReady is used to either mark a stream as ready // or to directly send a header type sendReady struct { Hdr []byte + mu sync.Mutex // Protects Body from unsafe reads. Body []byte Err chan error } @@ -101,8 +105,9 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { inflight: make(map[uint32]struct{}), synCh: make(chan struct{}, config.AcceptBacklog), acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), + sendCh: make(chan *sendReady, 64), recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), shutdownCh: make(chan struct{}), } if client { @@ -255,10 +260,15 @@ func (s *Session) Close() error { return nil } s.shutdown = true + + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = ErrSessionShutdown } + s.shutdownErrLock.Unlock() + close(s.shutdownCh) + s.conn.Close() <-s.recvDoneCh @@ -267,17 +277,18 @@ func (s *Session) Close() error { for _, stream := range s.streams { stream.forceClose() } + <-s.sendDoneCh return nil } // exitErr is used to handle an error that is causing the // session to terminate. func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = err } - s.shutdownLock.Unlock() + s.shutdownErrLock.Unlock() s.Close() } @@ -373,7 +384,7 @@ func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) erro timerPool.Put(t) }() - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} select { case s.sendCh <- ready: case <-s.shutdownCh: @@ -382,12 +393,34 @@ func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) erro return ErrConnectionWriteTimeout } + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + select { case err := <-errCh: return err case <-s.shutdownCh: + bodyCopy() return ErrSessionShutdown case <-timer.C: + bodyCopy() return ErrConnectionWriteTimeout } } @@ -409,7 +442,7 @@ func (s *Session) sendNoWait(hdr header) error { }() select { - case s.sendCh <- sendReady{Hdr: hdr}: + case s.sendCh <- &sendReady{Hdr: hdr}: return nil case <-s.shutdownCh: return ErrSessionShutdown @@ -420,39 +453,59 @@ func (s *Session) sendNoWait(hdr header) error { // send is a long running goroutine that sends data func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer for { + bodyBuf.Reset() + select { case ready := <-s.sendCh: // Send a header if ready if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err } } - // Send data from a body if given + ready.mu.Lock() if ready.Body != nil { - _, err := s.conn.Write(ready.Body) + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) - s.exitErr(err) - return + return err } } // No error, successful send asyncSendErr(ready.Err, nil) case <-s.shutdownCh: - return + return nil } } } @@ -639,8 +692,9 @@ func (s *Session) incomingStream(id uint32) error { // Backlog exceeded! RST the stream s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index f444bdc3c0..d197d28e5e 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -2,6 +2,7 @@ package yamux import ( "bytes" + "errors" "io" "sync" "sync/atomic" @@ -200,6 +201,10 @@ START: // Send the header s.sendHdr.encode(typeData, flags, s.id, max) if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } return 0, err } @@ -273,6 +278,10 @@ func (s *Stream) sendWindowUpdate() error { // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -287,6 +296,10 @@ func (s *Stream) sendClose() error { flags |= flagFIN s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -362,8 +375,9 @@ func (s *Stream) closeTimeout() { // Send a RST so the remote side closes too. s.sendLock.Lock() defer s.sendLock.Unlock() - s.sendHdr.encode(typeWindowUpdate, flagRST, s.id, 0) - s.session.sendNoWait(s.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) } // forceClose is used for when the session is exiting @@ -465,6 +479,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length > s.recvWindow { s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() return ErrRecvWindowExceeded } diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go index 9a5fa43312..25e12c033e 100644 --- a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -79,6 +79,8 @@ const ( // The SPDX mandates 'spdxVersion' field, so predicate type can omit // version. PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/schema" // PredicateLinkV1 represents an in-toto 0.9 link. PredicateLinkV1 = "https://in-toto.io/Link/v1" ) @@ -1008,6 +1010,16 @@ type SPDXStatement struct { Predicate interface{} `json:"predicate"` } +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + /* DSSESigner provides signature generation and validation based on the SSL Signing Spec: https://github.com/secure-systems-lab/signing-spec diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/CHANGELOG.md b/vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/CHANGELOG.md rename to vendor/github.com/jellydator/ttlcache/v2/CHANGELOG.md diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/LICENSE b/vendor/github.com/jellydator/ttlcache/v2/LICENSE similarity index 97% rename from vendor/github.com/ReneKroon/ttlcache/v2/LICENSE rename to vendor/github.com/jellydator/ttlcache/v2/LICENSE index b3b587dceb..f36a3b9678 100644 --- a/vendor/github.com/ReneKroon/ttlcache/v2/LICENSE +++ b/vendor/github.com/jellydator/ttlcache/v2/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 Rene Kroon +Copyright (c) 2022 Jellydator Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/Readme.md b/vendor/github.com/jellydator/ttlcache/v2/Readme.md similarity index 79% rename from vendor/github.com/ReneKroon/ttlcache/v2/Readme.md rename to vendor/github.com/jellydator/ttlcache/v2/Readme.md index 6bc07b8e9a..9c736cdbd0 100644 --- a/vendor/github.com/ReneKroon/ttlcache/v2/Readme.md +++ b/vendor/github.com/jellydator/ttlcache/v2/Readme.md @@ -1,7 +1,7 @@ # TTLCache - an in-memory cache with expiration -[![Documentation](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/ReneKroon/ttlcache/v2) -[![Release](https://img.shields.io/github/release/ReneKroon/ttlcache.svg?label=Release)](https://github.com/ReneKroon/ttlcache/releases) +**Although v2 of ttlcache is not yet deprecated, v3 should be used as it +contains quite a few additions and improvements.** TTLCache is a simple key/value cache in golang with the following functions: @@ -15,15 +15,9 @@ TTLCache is a simple key/value cache in golang with the following functions: Note (issue #25): by default, due to historic reasons, the TTL will be reset on each cache hit and you need to explicitly configure the cache to use a TTL that will not get extended. -[![Build Status](https://www.travis-ci.com/ReneKroon/ttlcache.svg?branch=master)](https://travis-ci.com/ReneKroon/ttlcache) -[![Go Report Card](https://goreportcard.com/badge/github.com/ReneKroon/ttlcache)](https://goreportcard.com/report/github.com/ReneKroon/ttlcache) -[![Coverage Status](https://coveralls.io/repos/github/ReneKroon/ttlcache/badge.svg?branch=master)](https://coveralls.io/github/ReneKroon/ttlcache?branch=master) -[![GitHub issues](https://img.shields.io/github/issues/ReneKroon/ttlcache.svg)](https://github.com/ReneKroon/ttlcache/issues) -[![license](https://img.shields.io/github/license/ReneKroon/ttlcache.svg?maxAge=2592000)](https://github.com/ReneKroon/ttlcache/LICENSE) - ## Usage -`go get github.com/ReneKroon/ttlcache/v2` +`go get github.com/jellydator/ttlcache/v2` You can copy it as a full standalone demo program. The first snippet is basic usage, where the second exploits more options in the cache. @@ -35,7 +29,7 @@ import ( "fmt" "time" - "github.com/ReneKroon/ttlcache/v2" + "github.com/jellydator/ttlcache/v2" ) var notFound = ttlcache.ErrNotFound @@ -65,7 +59,7 @@ import ( "fmt" "time" - "github.com/ReneKroon/ttlcache/v2" + "github.com/jellydator/ttlcache/v2" ) var ( diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/cache.go b/vendor/github.com/jellydator/ttlcache/v2/cache.go similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/cache.go rename to vendor/github.com/jellydator/ttlcache/v2/cache.go diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/evictionreason_enumer.go b/vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/evictionreason_enumer.go rename to vendor/github.com/jellydator/ttlcache/v2/evictionreason_enumer.go diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/item.go b/vendor/github.com/jellydator/ttlcache/v2/item.go similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/item.go rename to vendor/github.com/jellydator/ttlcache/v2/item.go diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/metrics.go b/vendor/github.com/jellydator/ttlcache/v2/metrics.go similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/metrics.go rename to vendor/github.com/jellydator/ttlcache/v2/metrics.go diff --git a/vendor/github.com/ReneKroon/ttlcache/v2/priority_queue.go b/vendor/github.com/jellydator/ttlcache/v2/priority_queue.go similarity index 100% rename from vendor/github.com/ReneKroon/ttlcache/v2/priority_queue.go rename to vendor/github.com/jellydator/ttlcache/v2/priority_queue.go diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go index 42f0f8eb12..9b1c20fc2c 100644 --- a/vendor/github.com/jhump/protoreflect/desc/descriptor.go +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go @@ -189,6 +189,9 @@ func (fd *FileDescriptor) GetServices() []*ServiceDescriptor { // element with the given fully-qualified symbol name. If no such element // exists then this method returns nil. func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor { + if len(symbol) == 0 { + return nil + } if symbol[0] == '.' { symbol = symbol[1:] } diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go index 6b48865a59..25d619a288 100644 --- a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go @@ -1,4 +1,6 @@ -//+build appengine gopherjs purego +//go:build appengine || gopherjs || purego +// +build appengine gopherjs purego + // NB: other environments where unsafe is unappropriate should use "purego" build tag // https://github.com/golang/go/issues/23172 diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go index 52370d841d..691f0d88b6 100644 --- a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go +++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go @@ -1,4 +1,6 @@ -//+build !appengine,!gopherjs,!purego +//go:build !appengine && !gopherjs && !purego +// +build !appengine,!gopherjs,!purego + // NB: other environments where unsafe is unappropriate should use "purego" build tag // https://github.com/golang/go/issues/23172 diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go index 1740dce7d0..642f125ef0 100644 --- a/vendor/github.com/jhump/protoreflect/desc/doc.go +++ b/vendor/github.com/jhump/protoreflect/desc/doc.go @@ -24,18 +24,42 @@ // properties that are not immediately accessible through rich descriptor's // methods. // +// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same +// repo to see just how useful rich descriptors really are. +// +// +// Loading Descriptors +// // Rich descriptors can be accessed in similar ways as their "poor" cousins // (descriptor protos). Instead of using proto.FileDescriptor, use // desc.LoadFileDescriptor. Message descriptors and extension field descriptors // can also be easily accessed using desc.LoadMessageDescriptor and // desc.LoadFieldDescriptorForExtension, respectively. // +// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then +// the descriptors returned from these Load* functions will include source code +// information, and thus include comments for elements. +// +// +// Creating Descriptors +// // It is also possible create rich descriptors for proto messages that a given // Go program doesn't even know about. For example, they could be loaded from a // FileDescriptorSet file (which can be generated by protoc) or loaded from a // server. This enables interesting things like dynamic clients: where a Go // program can be an RPC client of a service it wasn't compiled to know about. // -// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same -// repo to see just how useful rich descriptors really are. +// You cannot create a message descriptor without also creating its enclosing +// file, because the enclosing file is what contains other relevant information +// like other symbols and dependencies/imports, which is how type references +// are resolved (such as when a field in a message has a type that is another +// message or enum). +// +// So the functions in this package for creating descriptors are all for +// creating *file* descriptors. See the various Create* functions for more +// information. +// +// Also see the desc/builder sub-package, for another API that makes it easier +// to synthesize descriptors programmatically. +// package desc diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go index 4a05830d1f..24d3e9b5e0 100644 --- a/vendor/github.com/jhump/protoreflect/desc/load.go +++ b/vendor/github.com/jhump/protoreflect/desc/load.go @@ -8,6 +8,7 @@ import ( "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "github.com/jhump/protoreflect/desc/sourceinfo" "github.com/jhump/protoreflect/internal" ) @@ -54,6 +55,7 @@ func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, } func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) { + fd.SourceCodeInfo = sourceinfo.SourceInfoForFile(fd.GetName()) deps := make([]*FileDescriptor, len(fd.GetDependency())) for i, dep := range fd.GetDependency() { resolvedDep := r.ResolveImport(fd.GetName(), dep) diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go index 9f4a74e697..40c32a42cf 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go @@ -163,21 +163,41 @@ func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNam } // FieldReferenceNode is a reference to a field name. It can indicate a regular -// field (simple unqualified name) or an extension field (possibly-qualified -// name that is enclosed either in brackets or parentheses). +// field (simple unqualified name), an extension field (possibly-qualified name +// that is enclosed either in brackets or parentheses), or an "any" type +// reference (a type URL in the form "server.host/fully.qualified.Name" that is +// enclosed in brackets). // -// This is used in options to indicate the names of custom options (which are +// Extension names are used in options to refer to custom options (which are // actually extensions), in which case the name is enclosed in parentheses "(" -// and ")". It is also used in message literals to set extension fields, in -// which case the name is enclosed in square brackets "[" and "]". +// and ")". They can also be used to refer to extension fields of options. // -// Example: +// Extension names are also used in message literals to set extension fields, +// in which case the name is enclosed in square brackets "[" and "]". +// +// "Any" type references can only be used in message literals, and are not +// allowed in option names. They are always enclosed in square brackets. An +// "any" type reference is distinguished from an extension name by the presence +// of a slash, which must be present in an "any" type reference and must be +// absent in an extension name. +// +// Examples: +// foobar // (foo.bar) +// [foo.bar] +// [type.googleapis.com/foo.bar] +// type FieldReferenceNode struct { compositeNode - Open *RuneNode // only present for extension names - Name IdentValueNode - Close *RuneNode // only present for extension names + Open *RuneNode // only present for extension names and "any" type references + + // only present for "any" type references + UrlPrefix IdentValueNode + Slash *RuneNode + + Name IdentValueNode + + Close *RuneNode // only present for extension names and "any" type references } // NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field. @@ -219,14 +239,55 @@ func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, clos } } +// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any" +// type reference. All args must be non-nil. The openSym and closeSym runes +// should be "[" and "]". The slashSym run should be "/". +func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode { + if name == nil { + panic("name is nil") + } + if openSym == nil { + panic("openSym is nil") + } + if closeSym == nil { + panic("closeSym is nil") + } + if urlPrefix == nil { + panic("urlPrefix is nil") + } + if slashSym == nil { + panic("slashSym is nil") + } + children := []Node{openSym, urlPrefix, slashSym, name, closeSym} + return &FieldReferenceNode{ + compositeNode: compositeNode{ + children: children, + }, + Open: openSym, + UrlPrefix: urlPrefix, + Slash: slashSym, + Name: name, + Close: closeSym, + } +} + // IsExtension reports if this is an extension name or not (e.g. enclosed in // punctuation, such as parentheses or brackets). func (a *FieldReferenceNode) IsExtension() bool { - return a.Open != nil + return a.Open != nil && a.Slash == nil +} + +// IsExtension reports if this is an extension name or not (e.g. enclosed in +// punctuation, such as parentheses or brackets). +func (a *FieldReferenceNode) IsAnyTypeReference() bool { + return a.Slash != nil } func (a *FieldReferenceNode) Value() string { if a.Open != nil { + if a.Slash != nil { + return string(a.Open.Rune) + string(a.UrlPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) + } return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune) } else { return string(a.Name.AsIdentifier()) diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go index 341676f333..77d3e8d84a 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go @@ -188,7 +188,7 @@ func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) { return AsInt32(n.EndVal, min, max) } -// ReservedNode represents reserved declaration, whic can be used to reserve +// ReservedNode represents reserved declaration, which can be used to reserve // either names or numbers. Examples: // // reserved 1, 10-12, 15; diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go index b19ab47bc8..3855938f8c 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go @@ -366,6 +366,11 @@ func (n *SignedFloatLiteralNode) AsFloat() float64 { } // BoolLiteralNode represents a boolean literal. +// +// Deprecated: The AST uses IdentNode for boolean literals, where the +// identifier value is "true" or "false". This is required because an +// identifier "true" is not necessarily a boolean value as it could also +// be an enum value named "true" (ditto for "false"). type BoolLiteralNode struct { *KeywordNode Val bool @@ -526,8 +531,9 @@ type MessageFieldNode struct { compositeNode Name *FieldReferenceNode // Sep represents the ':' separator between the name and value. If - // the value is a message literal (and thus starts with '<' or '{'), - // then the separator is optional, and thus may be nil. + // the value is a message literal (and thus starts with '<' or '{') + // or an array literal (starting with '[') then the separator is + // optional, and thus may be nil. Sep *RuneNode Val ValueNode } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/descriptor_protos.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/descriptor_protos.go index 41134541d7..eee8cb6312 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/descriptor_protos.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/descriptor_protos.go @@ -279,7 +279,7 @@ func (r *parseResult) asExtensionRanges(node *ast.ExtensionRangeNode, maxTag int opts := r.asUninterpretedOptions(node.Options.GetElements()) ers := make([]*dpb.DescriptorProto_ExtensionRange, len(node.Ranges)) for i, rng := range node.Ranges { - start, end := getRangeBounds(r, rng, 0, maxTag) + start, end := getRangeBounds(r, rng, 1, maxTag) er := &dpb.DescriptorProto_ExtensionRange{ Start: proto.Int32(start), End: proto.Int32(end + 1), @@ -506,7 +506,7 @@ func isMessageSetWireFormat(res *parseResult, scope string, md *dpb.DescriptorPr } func (r *parseResult) asMessageReservedRange(rng *ast.RangeNode, maxTag int32) *dpb.DescriptorProto_ReservedRange { - start, end := getRangeBounds(r, rng, 0, maxTag) + start, end := getRangeBounds(r, rng, 1, maxTag) rr := &dpb.DescriptorProto_ReservedRange{ Start: proto.Int32(start), End: proto.Int32(end + 1), diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go index e8f2cd0af9..ed67f56257 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "math" "strconv" "strings" "unicode/utf8" @@ -61,11 +62,6 @@ func (rr *runeReader) endMark() string { return m } -func lexError(l protoLexer, pos *SourcePos, err string) { - pl := l.(*protoLex) - _ = pl.errs.handleErrorWithPos(pos, err) -} - type protoLex struct { filename string input *runeReader @@ -226,7 +222,7 @@ func (l *protoLex) Lex(lval *protoSymType) int { l.offset += n l.adjustPos(c) - if strings.ContainsRune("\n\r\t ", c) { + if strings.ContainsRune("\n\r\t\f\v ", c) { l.ws = append(l.ws, c) continue } @@ -241,11 +237,10 @@ func (l *protoLex) Lex(lval *protoSymType) int { } if cn >= '0' && cn <= '9' { l.adjustPos(cn) - token := []rune{c, cn} - token = l.readNumber(token, false, true) - f, err := strconv.ParseFloat(string(token), 64) + token := l.readNumber(c, cn) + f, err := parseFloat(token) if err != nil { - l.setError(lval, err) + l.setError(lval, numError(err, "float", token)) return _ERROR } l.setFloat(lval, f) @@ -271,66 +266,49 @@ func (l *protoLex) Lex(lval *protoSymType) int { if c >= '0' && c <= '9' { // integer or float literal - if c == '0' { - cn, _, err := l.input.readRune() + token := l.readNumber(c) + if strings.HasPrefix(token, "0x") || strings.HasPrefix(token, "0X") { + // hexadecimal + ui, err := strconv.ParseUint(token[2:], 16, 64) if err != nil { - l.setInt(lval, 0) - return _INT_LIT - } - if cn == 'x' || cn == 'X' { - cnn, _, err := l.input.readRune() - if err != nil { - l.input.unreadRune(cn) - l.setInt(lval, 0) - return _INT_LIT - } - if (cnn >= '0' && cnn <= '9') || (cnn >= 'a' && cnn <= 'f') || (cnn >= 'A' && cnn <= 'F') { - // hexadecimal! - l.adjustPos(cn, cnn) - token := []rune{cnn} - token = l.readHexNumber(token) - ui, err := strconv.ParseUint(string(token), 16, 64) - if err != nil { - l.setError(lval, err) - return _ERROR - } - l.setInt(lval, ui) - return _INT_LIT - } - l.input.unreadRune(cnn) - l.input.unreadRune(cn) - l.setInt(lval, 0) - return _INT_LIT - } else { - l.input.unreadRune(cn) + l.setError(lval, numError(err, "hexadecimal integer", token[2:])) + return _ERROR } + l.setInt(lval, ui) + return _INT_LIT } - token := []rune{c} - token = l.readNumber(token, true, true) - numstr := string(token) - if strings.Contains(numstr, ".") || strings.Contains(numstr, "e") || strings.Contains(numstr, "E") { + if strings.Contains(token, ".") || strings.Contains(token, "e") || strings.Contains(token, "E") { // floating point! - f, err := strconv.ParseFloat(numstr, 64) + f, err := parseFloat(token) if err != nil { - l.setError(lval, err) + l.setError(lval, numError(err, "float", token)) return _ERROR } l.setFloat(lval, f) return _FLOAT_LIT } // integer! (decimal or octal) - ui, err := strconv.ParseUint(numstr, 0, 64) + base := 10 + if token[0] == '0' { + base = 8 + } + ui, err := strconv.ParseUint(token, base, 64) if err != nil { + kind := "integer" + if base == 8 { + kind = "octal integer" + } if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange { // if it's too big to be an int, parse it as a float var f float64 - f, err = strconv.ParseFloat(numstr, 64) + kind = "float" + f, err = parseFloat(token) if err == nil { l.setFloat(lval, f) return _FLOAT_LIT } } - l.setError(lval, err) + l.setError(lval, numError(err, kind, token)) return _ERROR } l.setInt(lval, ui) @@ -357,7 +335,10 @@ func (l *protoLex) Lex(lval *protoSymType) int { } if cn == '/' { l.adjustPos(cn) - hitNewline := l.skipToEndOfLineComment() + hitNewline, hasErr := l.skipToEndOfLineComment(lval) + if hasErr { + return _ERROR + } comment := l.newComment() comment.PosRange.End.Col++ if hitNewline { @@ -371,22 +352,54 @@ func (l *protoLex) Lex(lval *protoSymType) int { } if cn == '*' { l.adjustPos(cn) - if ok := l.skipToEndOfBlockComment(); !ok { + ok, hasErr := l.skipToEndOfBlockComment(lval) + if hasErr { + return _ERROR + } + if !ok { l.setError(lval, errors.New("block comment never terminates, unexpected EOF")) return _ERROR - } else { - l.comments = append(l.comments, l.newComment()) } + l.comments = append(l.comments, l.newComment()) continue } l.input.unreadRune(cn) } + if c < 32 || c == 127 { + l.setError(lval, errors.New("invalid control character")) + return _ERROR + } + if !strings.ContainsRune(";,.:=-+(){}[]<>/", c) { + l.setError(lval, errors.New("invalid character")) + return _ERROR + } l.setRune(lval, c) return int(c) } } +func parseFloat(token string) (float64, error) { + // strconv.ParseFloat allows _ to separate digits, but protobuf does not + if strings.ContainsRune(token, '_') { + return 0, &strconv.NumError{ + Func: "parseFloat", + Num: token, + Err: strconv.ErrSyntax, + } + } + f, err := strconv.ParseFloat(token, 64) + if err == nil { + return f, nil + } + if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange && math.IsInf(f, 1) { + // protoc doesn't complain about float overflow and instead just uses "infinity" + // so we mirror that behavior by just returning infinity and ignoring the error + return f, nil + } + return f, err +} + func (l *protoLex) posRange() ast.PosRange { return ast.PosRange{ Start: SourcePos{ @@ -526,79 +539,47 @@ func (l *protoLex) setError(lval *protoSymType, err error) { lval.err = l.addSourceError(err) } -func (l *protoLex) readNumber(sofar []rune, allowDot bool, allowExp bool) []rune { +func (l *protoLex) readNumber(sofar ...rune) string { token := sofar + allowExpSign := false for { c, _, err := l.input.readRune() if err != nil { break } - if c == '.' { - if !allowDot { - l.input.unreadRune(c) - break - } - allowDot = false - } else if c == 'e' || c == 'E' { - if !allowExp { - l.input.unreadRune(c) - break - } - allowExp = false - cn, _, err := l.input.readRune() - if err != nil { - l.input.unreadRune(c) - break - } - if cn == '-' || cn == '+' { - cnn, _, err := l.input.readRune() - if err != nil { - l.input.unreadRune(cn) - l.input.unreadRune(c) - break - } - if cnn < '0' || cnn > '9' { - l.input.unreadRune(cnn) - l.input.unreadRune(cn) - l.input.unreadRune(c) - break - } - l.adjustPos(c) - token = append(token, c) - c, cn = cn, cnn - } else if cn < '0' || cn > '9' { - l.input.unreadRune(cn) - l.input.unreadRune(c) - break - } - l.adjustPos(c) - token = append(token, c) - c = cn - } else if c < '0' || c > '9' { + if (c == '-' || c == '+') && !allowExpSign { l.input.unreadRune(c) break } - l.adjustPos(c) - token = append(token, c) - } - return token -} - -func (l *protoLex) readHexNumber(sofar []rune) []rune { - token := sofar - for { - c, _, err := l.input.readRune() - if err != nil { - break - } - if (c < 'a' || c > 'f') && (c < 'A' || c > 'F') && (c < '0' || c > '9') { + allowExpSign = false + if c != '.' && c != '_' && (c < '0' || c > '9') && + (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && + c != '-' && c != '+' { + // no more chars in the number token l.input.unreadRune(c) break } + if c == 'e' || c == 'E' { + // scientific notation char can be followed by + // an exponent sign + allowExpSign = true + } l.adjustPos(c) token = append(token, c) } - return token + return string(token) +} + +func numError(err error, kind, s string) error { + ne, ok := err.(*strconv.NumError) + if !ok { + return err + } + if ne.Err == strconv.ErrRange { + return fmt.Errorf("value out of range for %s: %s", kind, s) + } + // syntax error + return fmt.Errorf("invalid syntax in %s value: %s", kind, s) } func (l *protoLex) readIdentifier(sofar []rune) []rune { @@ -772,34 +753,42 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { return buf.String(), nil } -func (l *protoLex) skipToEndOfLineComment() bool { +func (l *protoLex) skipToEndOfLineComment(lval *protoSymType) (ok, hasErr bool) { for { c, _, err := l.input.readRune() if err != nil { - return false + return false, false } - if c == '\n' { - return true + switch c { + case '\n': + return true, false + case 0: + l.setError(lval, errors.New("invalid control character")) + return false, true } l.adjustPos(c) } } -func (l *protoLex) skipToEndOfBlockComment() bool { +func (l *protoLex) skipToEndOfBlockComment(lval *protoSymType) (ok, hasErr bool) { for { c, _, err := l.input.readRune() if err != nil { - return false + return false, false + } + if c == 0 { + l.setError(lval, errors.New("invalid control character")) + return false, true } l.adjustPos(c) if c == '*' { c, _, err := l.input.readRune() if err != nil { - return false + return false, false } if c == '/' { l.adjustPos(c) - return true + return true, false } l.input.unreadRune(c) } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go index 2aa32f0868..7bc27b08d1 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go @@ -71,7 +71,7 @@ func (l *linker) linkFiles() (map[string]*desc.FileDescriptor, error) { } // we should now have any message_set_wire_format options parsed // and can do further validation on tag ranges - if err := checkExtensionsInFile(fd, r); err != nil { + if err := l.checkExtensionsInFile(fd, r); err != nil { return nil, err } } @@ -145,7 +145,7 @@ func (l *linker) createDescriptorPool() error { file1, file2 = file2, file1 desc1, desc2 = desc2, desc1 } - node := l.files[file2].nodes[desc2] + node := l.files[file2].getNode(desc2) if err := l.errs.handleErrorWithPos(node.Start(), "duplicate symbol %s: already defined as %s in %q", k, descriptorType(desc1), file1); err != nil { return err } @@ -180,6 +180,11 @@ func addMessageToPool(r *parseResult, pool map[string]proto.Message, errs *error return err } prefix = fqn + "." + for _, ood := range md.OneofDecl { + if err := addOneofToPool(r, pool, errs, prefix, ood); err != nil { + return err + } + } for _, fld := range md.Field { if err := addFieldToPool(r, pool, errs, prefix, fld); err != nil { return err @@ -208,6 +213,11 @@ func addFieldToPool(r *parseResult, pool map[string]proto.Message, errs *errorHa return addToPool(r, pool, errs, fqn, fld) } +func addOneofToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, ood *dpb.OneofDescriptorProto) error { + fqn := prefix + ood.GetName() + return addToPool(r, pool, errs, fqn, ood) +} + func addEnumToPool(r *parseResult, pool map[string]proto.Message, errs *errorHandler, prefix string, ed *dpb.EnumDescriptorProto) error { fqn := prefix + ed.GetName() if err := addToPool(r, pool, errs, fqn, ed); err != nil { @@ -281,6 +291,8 @@ func descriptorType(m proto.Message) string { return "method" case *dpb.FileDescriptorProto: return "file" + case *dpb.OneofDescriptorProto: + return "oneof" default: // shouldn't be possible return fmt.Sprintf("%T", m) @@ -299,7 +311,7 @@ func (l *linker) resolveReferences() error { prefix += "." } if fd.Options != nil { - if err := l.resolveOptions(r, fd, "file", fd.GetName(), proto.MessageName(fd.Options), fd.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "file", fd.GetName(), fd.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -330,14 +342,14 @@ func (l *linker) resolveReferences() error { func (l *linker) resolveEnumTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, ed *dpb.EnumDescriptorProto, scopes []scope) error { enumFqn := prefix + ed.GetName() if ed.Options != nil { - if err := l.resolveOptions(r, fd, "enum", enumFqn, proto.MessageName(ed.Options), ed.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "enum", enumFqn, ed.Options.UninterpretedOption, scopes); err != nil { return err } } for _, evd := range ed.Value { if evd.Options != nil { evFqn := enumFqn + "." + evd.GetName() - if err := l.resolveOptions(r, fd, "enum value", evFqn, proto.MessageName(evd.Options), evd.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "enum value", evFqn, evd.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -347,16 +359,21 @@ func (l *linker) resolveEnumTypes(r *parseResult, fd *dpb.FileDescriptorProto, p func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, md *dpb.DescriptorProto, scopes []scope) error { fqn := prefix + md.GetName() - scope := messageScope(fqn, isProto3(fd), l, fd) - scopes = append(scopes, scope) - prefix = fqn + "." + // Strangely, when protoc resolves extension names, it uses the *enclosing* scope + // instead of the message's scope. So if the message contains an extension named "i", + // an option cannot refer to it as simply "i" but must qualify it (at a minimum "Msg.i"). + // So we don't add this messages scope to our scopes slice until *after* we do options. if md.Options != nil { - if err := l.resolveOptions(r, fd, "message", fqn, proto.MessageName(md.Options), md.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "message", fqn, md.Options.UninterpretedOption, scopes); err != nil { return err } } + scope := messageScope(fqn, isProto3(fd), l, fd) + scopes = append(scopes, scope) + prefix = fqn + "." + for _, nmd := range md.NestedType { if err := l.resolveMessageTypes(r, fd, prefix, nmd, scopes); err != nil { return err @@ -375,7 +392,7 @@ func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto for _, ood := range md.OneofDecl { if ood.Options != nil { ooName := fmt.Sprintf("%s.%s", fqn, ood.GetName()) - if err := l.resolveOptions(r, fd, "oneof", ooName, proto.MessageName(ood.Options), ood.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "oneof", ooName, ood.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -388,7 +405,7 @@ func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto for _, er := range md.ExtensionRange { if er.Options != nil { erName := fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1) - if err := l.resolveOptions(r, fd, "extension range", erName, proto.MessageName(er.Options), er.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "extension range", erName, er.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -447,7 +464,7 @@ func (l *linker) resolveFieldTypes(r *parseResult, fd *dpb.FileDescriptorProto, } if fld.Options != nil { - if err := l.resolveOptions(r, fd, elemType, thisName, proto.MessageName(fld.Options), fld.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, elemType, thisName, fld.Options.UninterpretedOption, scopes); err != nil { return err } } @@ -487,22 +504,26 @@ func (l *linker) resolveFieldTypes(r *parseResult, fd *dpb.FileDescriptorProto, } func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, sd *dpb.ServiceDescriptorProto, scopes []scope) error { - thisName := prefix + sd.GetName() + svcFqn := prefix + sd.GetName() if sd.Options != nil { - if err := l.resolveOptions(r, fd, "service", thisName, proto.MessageName(sd.Options), sd.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "service", svcFqn, sd.Options.UninterpretedOption, scopes); err != nil { return err } } + // not a message, but same scoping rules for nested elements as if it were + scope := messageScope(svcFqn, isProto3(fd), l, fd) + scopes = append(scopes, scope) + for _, mtd := range sd.Method { if mtd.Options != nil { - if err := l.resolveOptions(r, fd, "method", thisName+"."+mtd.GetName(), proto.MessageName(mtd.Options), mtd.Options.UninterpretedOption, scopes); err != nil { + if err := l.resolveOptions(r, fd, "method", svcFqn+"."+mtd.GetName(), mtd.Options.UninterpretedOption, scopes); err != nil { return err } } - scope := fmt.Sprintf("method %s.%s", thisName, mtd.GetName()) + scope := fmt.Sprintf("method %s.%s", svcFqn, mtd.GetName()) node := r.getMethodNode(mtd) - fqn, dsc, _ := l.resolve(fd, mtd.GetInputType(), true, scopes) + fqn, dsc, _ := l.resolve(fd, mtd.GetInputType(), false, scopes) if dsc == nil { if err := l.errs.handleErrorWithPos(node.GetInputType().Start(), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil { return err @@ -521,7 +542,7 @@ func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto } // TODO: make input and output type resolution more DRY - fqn, dsc, _ = l.resolve(fd, mtd.GetOutputType(), true, scopes) + fqn, dsc, _ = l.resolve(fd, mtd.GetOutputType(), false, scopes) if dsc == nil { if err := l.errs.handleErrorWithPos(node.GetOutputType().Start(), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil { return err @@ -542,48 +563,108 @@ func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto return nil } -func (l *linker) resolveOptions(r *parseResult, fd *dpb.FileDescriptorProto, elemType, elemName, optType string, opts []*dpb.UninterpretedOption, scopes []scope) error { - var scope string - if elemType != "file" { - scope = fmt.Sprintf("%s %s: ", elemType, elemName) +func (l *linker) resolveOptions(r *parseResult, fd *dpb.FileDescriptorProto, elemType, elemName string, opts []*dpb.UninterpretedOption, scopes []scope) error { + mc := &messageContext{ + res: r, + elementName: elemName, + elementType: elemType, } opts: for _, opt := range opts { + // resolve any extension names found in option names for _, nm := range opt.Name { if nm.GetIsExtension() { - node := r.getOptionNamePartNode(nm) - fqn, dsc, _ := l.resolve(fd, nm.GetNamePart(), false, scopes) - if dsc == nil { - if err := l.errs.handleErrorWithPos(node.Start(), "%sunknown extension %s", scope, nm.GetNamePart()); err != nil { - return err - } - continue opts - } - if dsc == sentinelMissingSymbol { - if err := l.errs.handleErrorWithPos(node.Start(), "%sunknown extension %s; resolved to %s which is not defined; consider using a leading dot", scope, nm.GetNamePart(), fqn); err != nil { + fqn, err := l.resolveExtensionName(nm.GetNamePart(), fd, scopes) + if err != nil { + node := r.getOptionNamePartNode(nm) + if err := l.errs.handleErrorWithPos(node.Start(), "%v%v", mc, err); err != nil { return err } continue opts } - if ext, ok := dsc.(*dpb.FieldDescriptorProto); !ok { - otherType := descriptorType(dsc) - if err := l.errs.handleErrorWithPos(node.Start(), "%sinvalid extension: %s is a %s, not an extension", scope, nm.GetNamePart(), otherType); err != nil { - return err - } - continue opts - } else if ext.GetExtendee() == "" { - if err := l.errs.handleErrorWithPos(node.Start(), "%sinvalid extension: %s is a field but not an extension", scope, nm.GetNamePart()); err != nil { + nm.NamePart = proto.String(fqn) + } + } + // also resolve any extension names found inside message literals in option values + mc.option = opt + optVal := r.getOptionNode(opt).GetValue() + if err := l.resolveOptionValue(r, mc, fd, optVal, scopes); err != nil { + return err + } + mc.option = nil + } + return nil +} + +func (l *linker) resolveOptionValue(r *parseResult, mc *messageContext, fd *dpb.FileDescriptorProto, val ast.ValueNode, scopes []scope) error { + optVal := val.Value() + switch optVal := optVal.(type) { + case []ast.ValueNode: + origPath := mc.optAggPath + defer func() { + mc.optAggPath = origPath + }() + for i, v := range optVal { + mc.optAggPath = fmt.Sprintf("%s[%d]", origPath, i) + if err := l.resolveOptionValue(r, mc, fd, v, scopes); err != nil { + return err + } + } + case []*ast.MessageFieldNode: + origPath := mc.optAggPath + defer func() { + mc.optAggPath = origPath + }() + for _, fld := range optVal { + // check for extension name + if fld.Name.IsExtension() { + fqn, err := l.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), fd, scopes) + if err != nil { + if err := l.errs.handleErrorWithPos(fld.Name.Name.Start(), "%v%v", mc, err); err != nil { return err } - continue opts + } else { + r.optionQualifiedNames[fld.Name.Name] = fqn } - nm.NamePart = proto.String("." + fqn) + } + + // recurse into value + mc.optAggPath = origPath + if origPath != "" { + mc.optAggPath += "." + } + if fld.Name.IsExtension() { + mc.optAggPath = fmt.Sprintf("%s[%s]", mc.optAggPath, string(fld.Name.Name.AsIdentifier())) + } else { + mc.optAggPath = fmt.Sprintf("%s%s", mc.optAggPath, string(fld.Name.Name.AsIdentifier())) + } + + if err := l.resolveOptionValue(r, mc, fd, fld.Val, scopes); err != nil { + return err } } } + return nil } +func (l *linker) resolveExtensionName(name string, fd *dpb.FileDescriptorProto, scopes []scope) (string, error) { + fqn, dsc, _ := l.resolve(fd, name, false, scopes) + if dsc == nil { + return "", fmt.Errorf("unknown extension %s", name) + } + if dsc == sentinelMissingSymbol { + return "", fmt.Errorf("unknown extension %s; resolved to %s which is not defined; consider using a leading dot", name, fqn) + } + if ext, ok := dsc.(*dpb.FieldDescriptorProto); !ok { + otherType := descriptorType(dsc) + return "", fmt.Errorf("invalid extension: %s is a %s, not an extension", name, otherType) + } else if ext.GetExtendee() == "" { + return "", fmt.Errorf("invalid extension: %s is a field but not an extension", name) + } + return "." + fqn, nil +} + func (l *linker) resolve(fd *dpb.FileDescriptorProto, name string, onlyTypes bool, scopes []scope) (fqn string, element proto.Message, proto3 bool) { if strings.HasPrefix(name, ".") { // already fully-qualified @@ -914,7 +995,63 @@ func (l *linker) checkForUnusedImports(filename string) { if pos == nil { pos = ast.UnknownPos(r.fd.GetName()) } - r.errs.warn(pos, errUnusedImport(dep)) + l.errs.warn(pos, errUnusedImport(dep)) + } + } +} + +func (l *linker) checkExtensionsInFile(fd *desc.FileDescriptor, res *parseResult) error { + for _, fld := range fd.GetExtensions() { + if err := l.checkExtension(fld, res); err != nil { + return err + } + } + for _, md := range fd.GetMessageTypes() { + if err := l.checkExtensionsInMessage(md, res); err != nil { + return err + } + } + return nil +} + +func (l *linker) checkExtensionsInMessage(md *desc.MessageDescriptor, res *parseResult) error { + for _, fld := range md.GetNestedExtensions() { + if err := l.checkExtension(fld, res); err != nil { + return err + } + } + for _, nmd := range md.GetNestedMessageTypes() { + if err := l.checkExtensionsInMessage(nmd, res); err != nil { + return err } } + return nil +} + +func (l *linker) checkExtension(fld *desc.FieldDescriptor, res *parseResult) error { + // NB: It's a little gross that we don't enforce these in validateBasic(). + // But requires some minimal linking to resolve the extendee, so we can + // interrogate its descriptor. + if fld.GetOwner().GetMessageOptions().GetMessageSetWireFormat() { + // Message set wire format requires that all extensions be messages + // themselves (no scalar extensions) + if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE { + pos := res.getFieldNode(fld.AsFieldDescriptorProto()).FieldType().Start() + return l.errs.handleErrorWithPos(pos, "messages with message-set wire format cannot contain scalar extensions, only messages") + } + if fld.IsRepeated() { + pos := res.getFieldNode(fld.AsFieldDescriptorProto()).FieldLabel().Start() + return l.errs.handleErrorWithPos(pos, "messages with message-set wire format cannot contain repeated extensions, only optional") + } + } else { + // In validateBasic() we just made sure these were within bounds for any message. But + // now that things are linked, we can check if the extendee is messageset wire format + // and, if not, enforce tighter limit. + if fld.GetNumber() > internal.MaxNormalTag { + pos := res.getFieldNode(fld.AsFieldDescriptorProto()).FieldTag().Start() + return l.errs.handleErrorWithPos(pos, "tag number %d is higher than max allowed tag number (%d)", fld.GetNumber(), internal.MaxNormalTag) + } + } + + return nil } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go index 1d7ac00008..f9f306bbf0 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go @@ -696,7 +696,7 @@ func interpretFileOptions(l *linker, r *parseResult, fd fileDescriptorish) error } } for _, fld := range fd.GetExtensions() { - if err := interpretFieldOptions(l, r, fld); err != nil { + if err := interpretFieldOptions(l, r, fld, true); err != nil { return err } } @@ -740,7 +740,7 @@ func interpretMessageOptions(l *linker, r *parseResult, md msgDescriptorish) err } } for _, fld := range md.GetFields() { - if err := interpretFieldOptions(l, r, fld); err != nil { + if err := interpretFieldOptions(l, r, fld, false); err != nil { return err } } @@ -755,7 +755,7 @@ func interpretMessageOptions(l *linker, r *parseResult, md msgDescriptorish) err } } for _, fld := range md.GetNestedExtensions() { - if err := interpretFieldOptions(l, r, fld); err != nil { + if err := interpretFieldOptions(l, r, fld, true); err != nil { return err } } @@ -782,7 +782,7 @@ func interpretMessageOptions(l *linker, r *parseResult, md msgDescriptorish) err return nil } -func interpretFieldOptions(l *linker, r *parseResult, fld fldDescriptorish) error { +func interpretFieldOptions(l *linker, r *parseResult, fld fldDescriptorish, isExtension bool) error { opts := fld.GetFieldOptions() if len(opts.GetUninterpretedOption()) > 0 { uo := opts.UninterpretedOption @@ -804,6 +804,10 @@ func interpretFieldOptions(l *linker, r *parseResult, fld fldDescriptorish) erro if err := r.errs.handleErrorWithPos(optNode.GetValue().Start(), "%s: expecting string value for json_name option", scope); err != nil { return err } + } else if isExtension { + if err := r.errs.handleErrorWithPos(optNode.GetName().Start(), "%s: option json_name is not allowed on extensions", scope); err != nil { + return err + } } else { fld.AsFieldDescriptorProto().JsonName = proto.String(string(opt.StringValue)) } @@ -858,7 +862,7 @@ func processDefaultOption(res *parseResult, scope string, fld fldDescriptorish, elementType: descriptorType(fld.AsProto()), option: opt, } - v, err := fieldValue(res, mc, fld, val, true) + v, err := fieldValue(res, mc, fld, val, true, false) if err != nil { return -1, res.errs.handleError(err) } @@ -1044,7 +1048,7 @@ func interpretField(res *parseResult, mc *messageContext, element descriptorish, if extName[0] == '.' { extName = extName[1:] /* skip leading dot */ } - fld = findExtension(element.GetFile(), extName, false, map[fileDescriptorish]struct{}{}) + fld = findExtension(element.GetFile(), extName) if fld == nil { return nil, res.errs.handleErrorWithPos(node.Start(), "%vunrecognized extension %s of %s", @@ -1069,7 +1073,7 @@ func interpretField(res *parseResult, mc *messageContext, element descriptorish, if len(opt.GetName()) > nameIndex+1 { nextnm := opt.GetName()[nameIndex+1] nextnode := res.getOptionNamePartNode(nextnm) - if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE { + if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE && fld.GetType() != dpb.FieldDescriptorProto_TYPE_GROUP { return nil, res.errs.handleErrorWithPos(nextnode.Start(), "%vcannot set field %s because %s is not a message", mc, nextnm.GetNamePart(), nm.GetNamePart()) @@ -1086,6 +1090,15 @@ func interpretField(res *parseResult, mc *messageContext, element descriptorish, v, err = dm.TryGetField(fld) fdm, _ = v.(*dynamic.Message) } else { + if ood := fld.GetOneOf(); ood != nil { + existingFld, _, err := dm.TryGetOneOfField(ood) + if err != nil { + return nil, res.errs.handleErrorWithPos(node.Start(), "%verror querying value: %s", mc, err) + } + if existingFld != nil && existingFld.GetNumber() != fld.GetNumber() { + return nil, res.errs.handleErrorWithPos(node.Start(), "%voneof %q already has field %q set", mc, ood.GetName(), fieldName(existingFld)) + } + } fdm = dynamic.NewMessage(fld.GetMessageType()) err = dm.TrySetField(fld, fdm) } @@ -1097,7 +1110,7 @@ func interpretField(res *parseResult, mc *messageContext, element descriptorish, } optNode := res.getOptionNode(opt) - if err := setOptionField(res, mc, dm, fld, node, optNode.GetValue()); err != nil { + if err := setOptionField(res, mc, dm, fld, node, optNode.GetValue(), false); err != nil { return nil, res.errs.handleError(err) } if fld.IsRepeated() { @@ -1106,31 +1119,44 @@ func interpretField(res *parseResult, mc *messageContext, element descriptorish, return path, nil } -func findExtension(fd fileDescriptorish, name string, public bool, checked map[fileDescriptorish]struct{}) *desc.FieldDescriptor { +func findExtension(fd fileDescriptorish, name string) *desc.FieldDescriptor { + d := findSymbol(fd, name, false, map[fileDescriptorish]struct{}{}) + if fld, ok := d.(*desc.FieldDescriptor); ok { + return fld + } + return nil +} + +func findMessage(fd fileDescriptorish, name string) *desc.MessageDescriptor { + d := findSymbol(fd, name, false, map[fileDescriptorish]struct{}{}) + if md, ok := d.(*desc.MessageDescriptor); ok { + return md + } + return nil +} + +func findSymbol(fd fileDescriptorish, name string, public bool, checked map[fileDescriptorish]struct{}) desc.Descriptor { if _, ok := checked[fd]; ok { return nil } checked[fd] = struct{}{} d := fd.FindSymbol(name) if d != nil { - if fld, ok := d.(*desc.FieldDescriptor); ok { - return fld - } - return nil + return d } // When public = false, we are searching only directly imported symbols. But we // also need to search transitive public imports due to semantics of public imports. if public { for _, dep := range fd.GetPublicDependencies() { - d := findExtension(dep, name, true, checked) + d := findSymbol(dep, name, true, checked) if d != nil { return d } } } else { for _, dep := range fd.GetDependencies() { - d := findExtension(dep, name, true, checked) + d := findSymbol(dep, name, true, checked) if d != nil { return d } @@ -1139,7 +1165,7 @@ func findExtension(fd fileDescriptorish, name string, public bool, checked map[f return nil } -func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, fld *desc.FieldDescriptor, name ast.Node, val ast.ValueNode) error { +func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, fld *desc.FieldDescriptor, name ast.Node, val ast.ValueNode, insideMsgLiteral bool) error { v := val.Value() if sl, ok := v.([]ast.ValueNode); ok { // handle slices a little differently than the others @@ -1152,7 +1178,7 @@ func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, f }() for index, item := range sl { mc.optAggPath = fmt.Sprintf("%s[%d]", origPath, index) - if v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, item, false); err != nil { + if v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, item, false, insideMsgLiteral); err != nil { return err } else if err = dm.TryAddRepeatedField(fld, v); err != nil { return errorWithPos(val.Start(), "%verror setting value: %s", mc, err) @@ -1161,10 +1187,21 @@ func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, f return nil } - v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, val, false) + v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, val, false, insideMsgLiteral) if err != nil { return err } + + if ood := fld.GetOneOf(); ood != nil { + existingFld, _, err := dm.TryGetOneOfField(ood) + if err != nil { + return errorWithPos(name.Start(), "%verror querying value: %s", mc, err) + } + if existingFld != nil && existingFld.GetNumber() != fld.GetNumber() { + return errorWithPos(name.Start(), "%voneof %q already has field %q set", mc, ood.GetName(), fieldName(existingFld)) + } + } + if fld.IsRepeated() { err = dm.TryAddRepeatedField(fld, v) } else { @@ -1295,7 +1332,7 @@ func valueKind(val interface{}) string { } } -func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val ast.ValueNode, enumAsString bool) (interface{}, error) { +func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val ast.ValueNode, enumAsString, insideMsgLiteral bool) (interface{}, error) { v := val.Value() t := fld.AsFieldDescriptorProto().GetType() switch t { @@ -1315,45 +1352,33 @@ func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val case dpb.FieldDescriptorProto_TYPE_MESSAGE, dpb.FieldDescriptorProto_TYPE_GROUP: if aggs, ok := v.([]*ast.MessageFieldNode); ok { fmd := fld.GetMessageType() - fdm := dynamic.NewMessage(fmd) - origPath := mc.optAggPath - defer func() { - mc.optAggPath = origPath - }() - for _, a := range aggs { - if origPath == "" { - mc.optAggPath = a.Name.Value() - } else { - mc.optAggPath = origPath + "." + a.Name.Value() - } - var ffld *desc.FieldDescriptor - if a.Name.IsExtension() { - n := string(a.Name.Name.AsIdentifier()) - ffld = findExtension(mc.file, n, false, map[fileDescriptorish]struct{}{}) - if ffld == nil { - // may need to qualify with package name - pkg := mc.file.GetPackage() - if pkg != "" { - ffld = findExtension(mc.file, pkg+"."+n, false, map[fileDescriptorish]struct{}{}) - } - } - } else { - ffld = fmd.FindFieldByName(a.Name.Value()) - } - if ffld == nil { - return nil, errorWithPos(val.Start(), "%vfield %s not found", mc, string(a.Name.Name.AsIdentifier())) - } - if err := setOptionField(res, mc, fdm, ffld, a.Name, a.Val); err != nil { - return nil, err - } - } - return fdm, nil + return messageLiteralValue(res, mc, aggs, fmd) } return nil, errorWithPos(val.Start(), "%vexpecting message, got %s", mc, valueKind(v)) case dpb.FieldDescriptorProto_TYPE_BOOL: if b, ok := v.(bool); ok { return b, nil } + if id, ok := v.(ast.Identifier); ok { + if insideMsgLiteral { + // inside a message literal, values use the protobuf text format, + // which is lenient in that it accepts "t" and "f" or "True" and "False" + switch id { + case "t", "true", "True": + return true, nil + case "f", "false", "False": + return false, nil + } + } else { + // options with simple scalar values (no message literal) are stricter + switch id { + case "true": + return true, nil + case "false": + return false, nil + } + } + } return nil, errorWithPos(val.Start(), "%vexpecting bool, got %s", mc, valueKind(v)) case dpb.FieldDescriptorProto_TYPE_BYTES: if str, ok := v.(string); ok { @@ -1416,6 +1441,14 @@ func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val } return nil, errorWithPos(val.Start(), "%vexpecting uint64, got %s", mc, valueKind(v)) case dpb.FieldDescriptorProto_TYPE_DOUBLE: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return math.Inf(1), nil + case "nan": + return math.NaN(), nil + } + } if d, ok := v.(float64); ok { return d, nil } @@ -1427,6 +1460,14 @@ func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val } return nil, errorWithPos(val.Start(), "%vexpecting double, got %s", mc, valueKind(v)) case dpb.FieldDescriptorProto_TYPE_FLOAT: + if id, ok := v.(ast.Identifier); ok { + switch id { + case "inf": + return float32(math.Inf(1)), nil + case "nan": + return float32(math.NaN()), nil + } + } if d, ok := v.(float64); ok { if (d > math.MaxFloat32 || d < -math.MaxFloat32) && !math.IsInf(d, 1) && !math.IsInf(d, -1) && !math.IsNaN(d) { return nil, errorWithPos(val.Start(), "%vvalue %f is out of range for float", mc, d) @@ -1444,3 +1485,97 @@ func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val return nil, errorWithPos(val.Start(), "%vunrecognized field type: %s", mc, t) } } + +func messageLiteralValue(res *parseResult, mc *messageContext, val []*ast.MessageFieldNode, fmd *desc.MessageDescriptor) (*dynamic.Message, error) { + fdm := dynamic.NewMessage(fmd) + origPath := mc.optAggPath + defer func() { + mc.optAggPath = origPath + }() + for _, fldNode := range val { + if origPath == "" { + mc.optAggPath = fldNode.Name.Value() + } else { + mc.optAggPath = origPath + "." + fldNode.Name.Value() + } + + if fldNode.Name.IsAnyTypeReference() { + if fmd.GetFullyQualifiedName() == "google.protobuf.Any" { + // TODO: Support other URLs dynamically -- the caller of protoparse + // should be able to provide fldNode custom resolver that can resolve type + // URLs into message descriptors. The default resolver would be + // implemented as below, only accepting "type.googleapis.com" and + // "type.googleprod.com" as hosts/prefixes and using the compiled + // file's transitive closure to find the named message. + urlPrefix := fldNode.Name.UrlPrefix.AsIdentifier() + msgName := fldNode.Name.Name.AsIdentifier() + fullUrl := fmt.Sprintf("%s/%s", urlPrefix, msgName) + if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" { + return nil, errorWithPos(fldNode.Name.UrlPrefix.Start(), "%vcould not resolve type reference %s", mc, fullUrl) + } + anyFields, ok := fldNode.Val.Value().([]*ast.MessageFieldNode) + if !ok { + return nil, errorWithPos(fldNode.Val.Start(), "%vtype references for google.protobuf.Any must have message literal value", mc) + } + anyMd := findMessage(mc.file, string(msgName)) + if anyMd == nil { + return nil, errorWithPos(fldNode.Name.UrlPrefix.Start(), "%vcould not resolve type reference %s", mc, fullUrl) + } + // parse the message value + msgVal, err := messageLiteralValue(res, mc, anyFields, anyMd) + if err != nil { + return nil, err + } + + // Any is defined with two fields: + // string type_url = 1 + // bytes value = 2 + if err := fdm.TrySetFieldByNumber(1, fullUrl); err != nil { + return nil, errorWithPos(fldNode.Name.Start(), "%vfailed to set type_url string field on Any: %w", mc, err) + } + b, err := msgVal.MarshalDeterministic() + if err != nil { + return nil, errorWithPos(fldNode.Val.Start(), "%vfailed to serialize message value: %w", mc, err) + } + if err := fdm.TrySetFieldByNumber(2, b); err != nil { + return nil, errorWithPos(fldNode.Name.Start(), "%vfailed to set value bytes field on Any: %w", mc, err) + } + } else { + return nil, errorWithPos(fldNode.Name.UrlPrefix.Start(), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.GetFullyQualifiedName()) + } + } else { + var ffld *desc.FieldDescriptor + if fldNode.Name.IsExtension() { + if n := res.optionQualifiedNames[fldNode.Name.Name]; n != "" { + ffld = findExtension(mc.file, n) + } + } else { + ffld = fmd.FindFieldByName(fldNode.Name.Value()) + // Groups are indicated in the text format by the group name (which is + // camel-case), NOT the field name (which is lower-case). + // ...but only regular fields, not extensions that are groups... + if ffld != nil && ffld.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP && ffld.GetMessageType().GetName() != fldNode.Name.Value() { + // this is kind of silly to fail here, but this mimics protoc behavior + return nil, errorWithPos(fldNode.Start(), "%vfield %s not found (did you mean the group named %s?)", mc, fldNode.Name.Value(), ffld.GetMessageType().GetName()) + } + if ffld == nil { + // could be fldNode group name + for _, fd := range fmd.GetFields() { + if fd.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP && fd.GetMessageType().GetName() == fldNode.Name.Value() { + // found it! + ffld = fd + break + } + } + } + } + if ffld == nil { + return nil, errorWithPos(fldNode.Name.Name.Start(), "%vfield %s not found", mc, string(fldNode.Name.Name.AsIdentifier())) + } + if err := setOptionField(res, mc, fdm, ffld, fldNode.Name, fldNode.Val, true); err != nil { + return nil, err + } + } + } + return fdm, nil +} diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go index bdc000c227..5300aab6cf 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go @@ -634,6 +634,12 @@ type parseResult struct { // a map of uninterpreted option AST nodes to their relative path // in the resulting options message interpretedOptions map[*ast.OptionNode][]int32 + + // a map of AST nodes that represent identifiers in ast.FieldReferenceNodes + // to their fully-qualified name. The identifiers are for field names in + // message literals (in option values) that are extension fields. These names + // are resolved during linking and stored here, to be used to interpret options. + optionQualifiedNames map[ast.IdentValueNode]string } func (r *parseResult) getFileNode(f *dpb.FileDescriptorProto) ast.FileDeclNode { @@ -706,6 +712,13 @@ func (r *parseResult) getMethodNode(m *dpb.MethodDescriptorProto) ast.RPCDeclNod return r.nodes[m].(ast.RPCDeclNode) } +func (r *parseResult) getNode(m proto.Message) ast.Node { + if r.nodes == nil { + return ast.NewNoSourceNode(r.fd.GetName()) + } + return r.nodes[m] +} + func (r *parseResult) putFileNode(f *dpb.FileDescriptorProto, n *ast.FileNode) { r.nodes[f] = n } @@ -781,10 +794,11 @@ func parseProto(filename string, r io.Reader, errs *errorHandler, validate, crea func createParseResult(filename string, file *ast.FileNode, errs *errorHandler, createProtos bool) *parseResult { res := &parseResult{ - errs: errs, - root: file, - nodes: map[proto.Message]ast.Node{}, - interpretedOptions: map[*ast.OptionNode][]int32{}, + errs: errs, + root: file, + nodes: map[proto.Message]ast.Node{}, + interpretedOptions: map[*ast.OptionNode][]int32{}, + optionQualifiedNames: map[ast.IdentValueNode]string{}, } if createProtos { res.createFileDescriptor(filename, file) @@ -803,58 +817,6 @@ func checkTag(pos *SourcePos, v uint64, maxTag int32) error { return nil } -func checkExtensionsInFile(fd *desc.FileDescriptor, res *parseResult) error { - for _, fld := range fd.GetExtensions() { - if err := checkExtension(fld, res); err != nil { - return err - } - } - for _, md := range fd.GetMessageTypes() { - if err := checkExtensionsInMessage(md, res); err != nil { - return err - } - } - return nil -} - -func checkExtensionsInMessage(md *desc.MessageDescriptor, res *parseResult) error { - for _, fld := range md.GetNestedExtensions() { - if err := checkExtension(fld, res); err != nil { - return err - } - } - for _, nmd := range md.GetNestedMessageTypes() { - if err := checkExtensionsInMessage(nmd, res); err != nil { - return err - } - } - return nil -} - -func checkExtension(fld *desc.FieldDescriptor, res *parseResult) error { - // NB: It's a little gross that we don't enforce these in validateBasic(). - // But requires some minimal linking to resolve the extendee, so we can - // interrogate its descriptor. - if fld.GetOwner().GetMessageOptions().GetMessageSetWireFormat() { - // Message set wire format requires that all extensions be messages - // themselves (no scalar extensions) - if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE { - pos := res.getFieldNode(fld.AsFieldDescriptorProto()).FieldType().Start() - return errorWithPos(pos, "messages with message-set wire format cannot contain scalar extensions, only messages") - } - } else { - // In validateBasic() we just made sure these were within bounds for any message. But - // now that things are linked, we can check if the extendee is messageset wire format - // and, if not, enforce tighter limit. - if fld.GetNumber() > internal.MaxNormalTag { - pos := res.getFieldNode(fld.AsFieldDescriptorProto()).FieldTag().Start() - return errorWithPos(pos, "tag number %d is higher than max allowed tag number (%d)", fld.GetNumber(), internal.MaxNormalTag) - } - } - - return nil -} - func aggToString(agg []*ast.MessageFieldNode, buf *bytes.Buffer) { buf.WriteString("{") for _, a := range agg { diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y index 63b32d9d6d..67a52ae496 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y @@ -279,13 +279,7 @@ scalarConstant : stringLit { } | numLit | name { - if $1.Val == "true" || $1.Val == "false" { - $$ = ast.NewBoolLiteralNode($1.ToKeyword()) - } else if $1.Val == "inf" || $1.Val == "nan" { - $$ = ast.NewSpecialFloatLiteralNode($1.ToKeyword()) - } else { - $$ = $1 - } + $$ = $1 } numLit : _FLOAT_LIT { @@ -388,6 +382,14 @@ aggFieldEntry : aggName ':' scalarConstant { $$ = nil } } + | aggName '[' ']' { + if $1 != nil { + val := ast.NewArrayLiteralNode($2, nil, nil, $3) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } | aggName ':' '[' ']' { if $1 != nil { val := ast.NewArrayLiteralNode($3, nil, nil, $4) @@ -396,6 +398,15 @@ aggFieldEntry : aggName ':' scalarConstant { $$ = nil } } + | aggName '[' constantList ']' { + if $1 != nil { + vals, commas := $3.toNodes() + val := ast.NewArrayLiteralNode($2, vals, commas, $4) + $$ = ast.NewMessageFieldNode($1, nil, val) + } else { + $$ = nil + } + } | aggName ':' '[' constantList ']' { if $1 != nil { vals, commas := $4.toNodes() @@ -450,8 +461,11 @@ aggFieldEntry : aggName ':' scalarConstant { aggName : name { $$ = ast.NewFieldReferenceNode($1) } - | '[' typeIdent ']' { - $$ = ast.NewExtensionFieldReferenceNode($1, $2, $3) + | '[' ident ']' { + $$ = ast.NewExtensionFieldReferenceNode($1, $2.toIdentValueNode(nil), $3) + } + | '[' ident '/' ident ']' { + $$ = ast.NewAnyTypeReferenceNode($1, $2.toIdentValueNode(nil), $3, $4.toIdentValueNode(nil), $5) } | '[' error ']' { $$ = nil diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go index 61d9038651..31fdcb1cff 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go @@ -210,7 +210,7 @@ const protoEofCode = 1 const protoErrCode = 2 const protoInitialStackSize = 16 -//line proto.y:1191 +//line proto.y:1205 //line yacctab:1 var protoExca = [...]int{ @@ -230,16 +230,16 @@ var protoExca = [...]int{ 1, 3, -2, 0, -1, 95, - 55, 178, + 55, 181, -2, 0, -1, 96, - 55, 166, + 55, 169, -2, 0, -1, 97, - 55, 195, + 55, 198, -2, 0, -1, 99, - 55, 204, + 55, 207, -2, 0, -1, 110, 55, 53, @@ -248,360 +248,372 @@ var protoExca = [...]int{ 55, 51, 61, 51, -2, 0, - -1, 352, + -1, 353, 61, 53, -2, 0, - -1, 367, - 55, 116, + -1, 368, + 55, 119, -2, 0, - -1, 401, + -1, 402, 61, 53, -2, 0, - -1, 489, + -1, 406, 61, 53, -2, 0, - -1, 533, - 55, 178, + -1, 543, + 55, 181, -2, 0, - -1, 537, - 55, 178, + -1, 547, + 55, 181, -2, 0, - -1, 541, - 55, 178, + -1, 551, + 55, 181, -2, 0, - -1, 559, - 55, 216, + -1, 569, + 55, 219, -2, 0, - -1, 566, - 55, 178, + -1, 575, + 55, 181, -2, 0, - -1, 569, - 55, 178, + -1, 578, + 55, 181, -2, 0, - -1, 572, - 55, 178, + -1, 581, + 55, 181, -2, 0, - -1, 593, - 55, 178, + -1, 602, + 55, 181, -2, 0, - -1, 605, - 55, 178, + -1, 612, + 55, 181, -2, 0, } const protoPrivate = 57344 -const protoLast = 2321 +const protoLast = 2437 var protoAct = [...]int{ - 31, 118, 117, 125, 8, 396, 8, 8, 488, 81, - 486, 414, 363, 288, 579, 421, 328, 107, 77, 79, - 80, 82, 84, 326, 317, 311, 8, 106, 105, 124, - 281, 228, 139, 26, 177, 593, 416, 591, 30, 525, - 555, 553, 85, 551, 541, 87, 88, 89, 364, 539, - 537, 535, 364, 531, 75, 364, 364, 364, 364, 533, - 524, 519, 512, 364, 364, 500, 364, 473, 364, 362, - 502, 405, 364, 560, 404, 329, 364, 364, 523, 397, - 364, 329, 364, 349, 364, 29, 329, 116, 94, 350, - 110, 348, 563, 109, 77, 320, 562, 584, 352, 349, - 178, 104, 550, 282, 529, 98, 349, 348, 530, 492, - 349, 293, 347, 103, 348, 493, 583, 477, 348, 302, - 346, 526, 503, 472, 388, 371, 365, 230, 511, 185, - 115, 339, 321, 93, 330, 310, 91, 314, 315, 344, - 330, 304, 306, 308, 14, 330, 285, 582, 605, 572, - 4, 15, 569, 566, 16, 17, 318, 17, 17, 558, - 367, 559, 14, 319, 316, 285, 582, 99, 97, 15, - 96, 95, 16, 17, 603, 597, 17, 17, 577, 576, - 178, 575, 570, 567, 564, 19, 18, 20, 21, 557, - 549, 286, 543, 515, 13, 507, 284, 581, 413, 340, - 595, 387, 318, 19, 18, 20, 21, 370, 369, 185, - 286, 323, 13, 342, 333, 284, 581, 324, 303, 287, - 102, 101, 100, 90, 86, 25, 547, 546, 504, 480, - 479, 478, 411, 410, 409, 408, 407, 337, 406, 394, - 368, 361, 325, 334, 335, 336, 92, 24, 482, 418, - 389, 366, 122, 11, 574, 11, 11, 230, 573, 332, - 338, 373, 374, 375, 376, 377, 378, 379, 380, 381, - 382, 383, 384, 120, 10, 11, 10, 10, 121, 9, - 518, 9, 9, 517, 282, 419, 516, 343, 29, 5, - 293, 300, 298, 23, 27, 28, 10, 29, 313, 499, - 498, 9, 299, 345, 353, 355, 356, 357, 358, 359, - 360, 341, 23, 297, 295, 497, 496, 495, 494, 481, - 351, 470, 313, 412, 296, 390, 29, 3, 283, 280, - 22, 12, 227, 179, 176, 391, 392, 393, 123, 327, - 312, 180, 386, 385, 128, 420, 137, 127, 424, 126, - 229, 109, 119, 293, 423, 289, 290, 427, 236, 134, - 429, 395, 237, 140, 183, 77, 402, 431, 239, 143, - 372, 422, 108, 292, 76, 578, 415, 7, 400, 398, - 6, 2, 1, 0, 0, 0, 0, 0, 0, 318, - 0, 417, 0, 0, 0, 0, 471, 0, 0, 474, - 109, 0, 293, 0, 0, 0, 476, 484, 0, 0, - 0, 0, 475, 0, 0, 490, 0, 0, 0, 0, - 0, 0, 0, 0, 422, 501, 0, 0, 508, 509, - 0, 0, 0, 0, 426, 0, 506, 0, 0, 432, - 433, 434, 435, 436, 437, 17, 438, 439, 440, 441, - 0, 0, 510, 442, 443, 444, 445, 446, 447, 448, - 449, 450, 451, 452, 453, 454, 455, 456, 428, 457, - 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, - 468, 469, 514, 513, 425, 0, 0, 505, 521, 0, - 293, 430, 0, 522, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 527, 77, 109, 0, 532, 534, 536, - 538, 540, 542, 545, 0, 544, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 109, 0, 552, - 554, 556, 0, 548, 0, 0, 565, 561, 0, 0, - 568, 0, 0, 0, 571, 0, 0, 0, 0, 0, + 118, 117, 125, 8, 405, 8, 8, 588, 364, 420, + 81, 427, 31, 397, 288, 326, 328, 107, 317, 82, + 311, 404, 106, 105, 281, 8, 124, 228, 139, 177, + 77, 79, 80, 26, 84, 422, 30, 410, 602, 540, + 350, 110, 85, 600, 565, 87, 88, 89, 409, 353, + 563, 365, 561, 75, 351, 551, 549, 547, 365, 365, + 545, 543, 541, 534, 533, 365, 528, 365, 365, 521, + 365, 365, 509, 511, 365, 365, 479, 365, 363, 496, + 411, 365, 570, 398, 365, 532, 329, 365, 116, 94, + 320, 365, 329, 365, 29, 329, 560, 104, 538, 178, + 349, 535, 282, 98, 349, 109, 77, 349, 348, 539, + 500, 103, 348, 536, 572, 348, 501, 571, 302, 349, + 347, 512, 483, 293, 230, 185, 14, 348, 346, 115, + 497, 478, 4, 15, 389, 372, 16, 17, 366, 520, + 344, 339, 304, 306, 308, 330, 612, 310, 321, 314, + 315, 330, 93, 14, 330, 91, 318, 581, 578, 316, + 15, 319, 285, 16, 17, 575, 591, 19, 18, 20, + 21, 591, 568, 17, 569, 285, 13, 17, 283, 178, + 368, 99, 17, 97, 96, 95, 17, 610, 606, 586, + 585, 584, 579, 576, 19, 18, 20, 21, 573, 567, + 559, 553, 318, 13, 524, 185, 323, 286, 516, 419, + 388, 371, 284, 370, 342, 340, 590, 333, 324, 604, + 286, 590, 303, 287, 102, 284, 101, 100, 90, 86, + 25, 557, 556, 513, 486, 485, 484, 417, 416, 415, + 414, 413, 412, 395, 334, 335, 336, 369, 362, 337, + 325, 92, 24, 488, 230, 332, 424, 390, 338, 374, + 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 367, 122, 11, 583, 11, 11, 120, 10, 582, + 10, 10, 527, 282, 121, 9, 5, 9, 9, 526, + 23, 525, 425, 29, 313, 11, 508, 300, 298, 343, + 10, 507, 293, 506, 345, 341, 505, 9, 299, 23, + 504, 503, 297, 295, 354, 352, 487, 356, 357, 358, + 359, 360, 361, 296, 29, 280, 476, 313, 418, 391, + 27, 28, 29, 3, 12, 227, 22, 179, 176, 123, + 327, 386, 387, 312, 180, 128, 426, 392, 393, 394, + 137, 127, 430, 126, 229, 119, 429, 289, 290, 396, + 433, 236, 134, 109, 109, 435, 293, 237, 407, 140, + 183, 428, 437, 401, 399, 239, 143, 373, 77, 108, + 292, 76, 587, 421, 7, 6, 2, 423, 1, 0, + 318, 477, 0, 0, 0, 0, 480, 0, 0, 0, + 0, 0, 0, 0, 0, 481, 0, 482, 0, 490, + 0, 0, 0, 109, 0, 293, 0, 494, 0, 293, + 0, 498, 492, 0, 0, 0, 0, 510, 0, 428, + 502, 0, 0, 0, 0, 0, 0, 0, 515, 0, + 0, 0, 0, 0, 0, 0, 517, 518, 0, 0, + 0, 0, 0, 0, 0, 0, 519, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 522, 0, 0, 0, 523, + 0, 0, 0, 0, 0, 0, 529, 0, 0, 0, + 531, 0, 530, 0, 0, 0, 0, 0, 0, 0, + 109, 0, 542, 544, 546, 548, 550, 552, 555, 537, + 0, 0, 554, 0, 0, 77, 109, 0, 0, 0, + 0, 0, 0, 0, 562, 564, 566, 0, 0, 0, + 558, 0, 0, 0, 0, 574, 0, 0, 0, 577, + 0, 0, 0, 580, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 580, 0, 0, 0, 302, 0, 586, - 302, 0, 588, 302, 0, 590, 0, 0, 0, 0, - 0, 0, 580, 0, 109, 109, 592, 594, 302, 0, - 302, 0, 302, 596, 598, 599, 604, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 302, 0, 607, 302, - 487, 0, 29, 114, 111, 32, 33, 34, 35, 36, + 0, 0, 589, 0, 0, 302, 0, 595, 302, 0, + 597, 302, 0, 599, 109, 109, 0, 0, 0, 0, + 589, 601, 603, 592, 593, 605, 302, 0, 302, 0, + 302, 0, 0, 0, 611, 0, 0, 0, 0, 0, + 0, 0, 302, 0, 614, 302, 493, 0, 29, 114, + 111, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 0, 110, 0, + 0, 0, 0, 0, 0, 0, 406, 113, 112, 0, + 0, 0, 491, 29, 114, 111, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, + 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, + 0, 406, 113, 112, 0, 0, 0, 403, 29, 114, + 111, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 0, 0, 0, 0, 110, 0, + 0, 0, 0, 0, 0, 0, 402, 113, 112, 0, + 0, 400, 29, 114, 111, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 110, 0, 0, 0, 0, 0, 0, 0, - 489, 113, 112, 0, 0, 0, 485, 29, 114, 111, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 0, 0, 0, 0, 110, 0, 0, - 0, 0, 0, 0, 0, 401, 113, 112, 0, 0, - 399, 29, 114, 111, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, - 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, - 0, 110, 0, 0, 0, 0, 0, 0, 0, 489, - 113, 112, 29, 114, 111, 32, 33, 34, 35, 36, - 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, - 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, - 0, 0, 110, 0, 0, 0, 0, 0, 528, 0, - 0, 113, 112, 32, 33, 34, 35, 36, 37, 38, - 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, - 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 491, - 0, 0, 0, 294, 32, 33, 34, 35, 36, 37, + 406, 113, 112, 29, 114, 111, 32, 33, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, + 0, 0, 0, 110, 0, 0, 0, 0, 0, 499, + 0, 0, 113, 112, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 403, 0, 0, 0, 294, 32, 33, 34, 35, 36, + 495, 0, 0, 0, 294, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 291, 0, 0, 0, 294, 32, 33, 34, 35, + 0, 408, 0, 0, 0, 294, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 294, 32, 33, 34, + 0, 0, 291, 0, 0, 0, 294, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 130, 0, 0, 0, 78, 144, 145, 146, 147, - 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, - 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, - 0, 129, 0, 0, 608, 130, 0, 0, 141, 0, - 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, - 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, - 173, 174, 175, 0, 0, 129, 0, 0, 606, 130, - 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, - 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, - 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, - 0, 0, 602, 130, 0, 0, 141, 0, 144, 145, - 146, 147, 148, 149, 17, 150, 151, 152, 153, 133, - 132, 131, 154, 155, 156, 157, 158, 159, 160, 161, - 162, 163, 164, 165, 166, 167, 168, 169, 136, 142, - 135, 170, 171, 138, 19, 18, 20, 172, 173, 174, - 175, 0, 0, 129, 0, 0, 601, 130, 0, 0, - 141, 0, 144, 145, 146, 147, 148, 149, 17, 150, - 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, - 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, - 600, 130, 0, 0, 141, 0, 144, 145, 146, 147, - 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, - 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, - 0, 129, 0, 0, 589, 130, 0, 0, 141, 0, - 144, 145, 146, 147, 148, 149, 17, 150, 151, 152, - 153, 133, 132, 131, 154, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, - 136, 142, 135, 170, 171, 138, 19, 18, 20, 172, - 173, 174, 175, 0, 0, 129, 0, 0, 587, 130, - 0, 0, 141, 0, 144, 145, 146, 147, 148, 149, - 17, 150, 151, 152, 153, 133, 132, 131, 154, 155, - 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, - 166, 167, 168, 169, 136, 142, 135, 170, 171, 138, - 19, 18, 20, 172, 173, 174, 175, 0, 0, 129, - 0, 0, 585, 232, 0, 0, 141, 0, 240, 241, - 242, 243, 244, 245, 246, 247, 248, 249, 250, 235, - 234, 233, 251, 252, 253, 254, 255, 256, 257, 258, - 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, - 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, - 279, 0, 0, 231, 0, 0, 331, 130, 0, 0, - 238, 0, 144, 145, 146, 147, 148, 149, 17, 150, - 151, 152, 153, 133, 132, 131, 154, 155, 156, 157, - 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, - 168, 169, 136, 142, 135, 170, 171, 138, 19, 18, - 20, 172, 173, 174, 175, 0, 0, 129, 0, 0, - 301, 130, 0, 0, 141, 0, 144, 145, 146, 147, - 148, 149, 17, 150, 151, 152, 153, 133, 132, 131, - 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, - 164, 165, 166, 167, 168, 169, 136, 142, 135, 170, - 171, 138, 19, 18, 20, 172, 173, 174, 175, 0, - 0, 129, 0, 0, 232, 0, 0, 0, 141, 240, + 0, 0, 0, 0, 0, 0, 0, 294, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 130, 0, 0, 0, 78, 144, 145, 146, + 147, 148, 149, 17, 150, 151, 152, 153, 133, 132, + 131, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 136, 142, 135, + 170, 171, 138, 19, 18, 20, 172, 173, 174, 175, + 0, 0, 129, 0, 0, 615, 130, 0, 0, 141, + 0, 144, 145, 146, 147, 148, 149, 17, 150, 151, + 152, 153, 133, 132, 131, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 136, 142, 135, 170, 171, 138, 19, 18, 20, + 172, 173, 174, 175, 0, 0, 129, 0, 0, 613, + 130, 0, 0, 141, 0, 144, 145, 146, 147, 148, + 149, 17, 150, 151, 152, 153, 133, 132, 131, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 136, 142, 135, 170, 171, + 138, 19, 18, 20, 172, 173, 174, 175, 0, 0, + 129, 0, 0, 609, 130, 0, 0, 141, 0, 144, + 145, 146, 147, 148, 149, 17, 150, 151, 152, 153, + 133, 132, 131, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 136, + 142, 135, 170, 171, 138, 19, 18, 20, 172, 173, + 174, 175, 0, 0, 129, 0, 0, 608, 130, 0, + 0, 141, 0, 144, 145, 146, 147, 148, 149, 17, + 150, 151, 152, 153, 133, 132, 131, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 136, 142, 135, 170, 171, 138, 19, + 18, 20, 172, 173, 174, 175, 0, 0, 129, 0, + 0, 607, 130, 0, 0, 141, 0, 144, 145, 146, + 147, 148, 149, 17, 150, 151, 152, 153, 133, 132, + 131, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 136, 142, 135, + 170, 171, 138, 19, 18, 20, 172, 173, 174, 175, + 0, 0, 129, 0, 0, 598, 130, 0, 0, 141, + 0, 144, 145, 146, 147, 148, 149, 17, 150, 151, + 152, 153, 133, 132, 131, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 136, 142, 135, 170, 171, 138, 19, 18, 20, + 172, 173, 174, 175, 0, 0, 129, 0, 0, 596, + 130, 0, 0, 141, 0, 144, 145, 146, 147, 148, + 149, 17, 150, 151, 152, 153, 133, 132, 131, 154, + 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 136, 142, 135, 170, 171, + 138, 19, 18, 20, 172, 173, 174, 175, 0, 0, + 129, 0, 0, 594, 232, 0, 0, 141, 0, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 235, 234, 233, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, - 278, 279, 0, 0, 231, 0, 0, 354, 0, 0, - 0, 238, 32, 33, 34, 35, 36, 37, 38, 39, + 278, 279, 0, 0, 231, 0, 0, 331, 130, 0, + 0, 238, 0, 144, 145, 146, 147, 148, 149, 17, + 150, 151, 152, 153, 133, 132, 131, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 136, 142, 135, 170, 171, 138, 19, + 18, 20, 172, 173, 174, 175, 0, 0, 129, 0, + 0, 301, 130, 0, 0, 141, 0, 144, 145, 146, + 147, 148, 149, 17, 150, 151, 152, 153, 133, 132, + 131, 154, 155, 156, 157, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 136, 142, 135, + 170, 171, 138, 19, 18, 20, 172, 173, 174, 175, + 0, 0, 129, 0, 0, 232, 0, 0, 0, 141, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 250, 235, 234, 233, 251, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 0, 0, 231, 0, 0, 0, 0, + 0, 0, 238, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, + 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, + 0, 432, 0, 0, 0, 83, 438, 439, 440, 441, + 442, 443, 17, 444, 445, 446, 447, 0, 0, 0, + 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, + 458, 459, 460, 461, 462, 434, 463, 464, 465, 466, + 467, 468, 469, 470, 471, 472, 473, 474, 475, 0, + 0, 431, 0, 0, 514, 0, 0, 0, 436, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + 489, 74, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 60, 309, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, + 57, 58, 59, 60, 307, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 483, - 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 83, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 54, 55, 56, 57, 58, 59, 60, 305, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 0, 0, 0, 0, 0, 432, 0, 0, 0, + 83, 438, 439, 440, 441, 442, 443, 17, 444, 445, + 446, 447, 0, 0, 0, 448, 449, 450, 451, 452, + 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, + 434, 463, 464, 465, 466, 467, 468, 469, 470, 471, + 472, 473, 474, 475, 0, 0, 431, 0, 0, 182, + 0, 0, 0, 436, 186, 187, 188, 189, 190, 191, + 17, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, 219, 184, + 220, 221, 222, 223, 224, 225, 226, 0, 182, 181, + 0, 0, 322, 186, 187, 188, 189, 190, 191, 17, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, + 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 218, 219, 184, 220, + 221, 222, 223, 224, 225, 226, 355, 0, 181, 0, + 0, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, - 309, 62, 63, 64, 65, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 83, 32, 33, 34, 35, 36, 37, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - 58, 59, 60, 307, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 83, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, - 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 305, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 0, 0, 0, 0, 0, 426, 0, 0, 0, 83, - 432, 433, 434, 435, 436, 437, 17, 438, 439, 440, - 441, 0, 0, 0, 442, 443, 444, 445, 446, 447, - 448, 449, 450, 451, 452, 453, 454, 455, 456, 428, - 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, - 467, 468, 469, 0, 0, 425, 0, 0, 182, 0, - 0, 0, 430, 186, 187, 188, 189, 190, 191, 17, - 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, - 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, - 212, 213, 214, 215, 216, 217, 218, 219, 184, 220, - 221, 222, 223, 224, 225, 226, 0, 182, 181, 0, - 0, 322, 186, 187, 188, 189, 190, 191, 17, 192, - 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, - 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, - 213, 214, 215, 216, 217, 218, 219, 184, 220, 221, - 222, 223, 224, 225, 226, 0, 0, 181, 32, 33, - 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, - 74, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, } var protoPact = [...]int{ - 142, -1000, 160, 160, 196, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 173, 284, 2271, 1100, 2271, 2271, - 1858, 2271, 160, -1000, 322, -1000, 172, 322, 322, 322, - 171, 77, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 124, -1000, 151, 151, 201, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 178, 320, 2387, 1171, 2387, 2387, + 1876, 2387, 151, -1000, 328, -1000, 177, 328, 328, 328, + 176, 96, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 195, 74, -1000, 1858, 117, - 116, 114, -1000, 2271, 113, 170, -1000, 169, 168, -1000, - -1000, 2271, 798, 1100, 21, 1699, 2225, 1752, -1000, 163, - -1000, -1000, -1000, -1000, 167, -1000, -1000, -1000, -1000, -1000, - 1039, -1000, 308, 286, -1000, -1000, -1000, 1645, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 200, 93, -1000, 1876, 131, + 130, 129, -1000, 2387, 127, 175, -1000, 174, 172, -1000, + -1000, 2387, 869, 1171, 22, 1770, 2296, 1823, -1000, 173, + -1000, -1000, -1000, -1000, 171, -1000, -1000, -1000, -1000, -1000, + 1110, -1000, 307, 292, -1000, -1000, -1000, 1716, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 166, 2070, 2017, 1964, 2271, 317, 2271, 2271, 293, -1000, - -1000, 2271, 33, 73, -1000, -1000, -1000, -1000, -1000, -1000, + 170, 2141, 2088, 2035, 2387, 322, 2387, 2387, 289, -1000, + -1000, 2387, 28, 89, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 2176, -1000, -1000, -1000, - -1000, -1000, 165, 191, 81, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 2247, -1000, -1000, -1000, + -1000, -1000, 166, 199, 90, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1591, -1000, -1000, - -1000, -1000, 162, 2070, 2017, 1964, 2271, -1000, 2271, 72, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1662, -1000, -1000, + -1000, -1000, 165, 2141, 2088, 2035, 2387, -1000, 2387, 82, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 144, -1000, -1000, -1000, -1000, 161, 2271, -1000, 84, 1039, - 60, 58, 36, -1000, 1805, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 2271, 2271, 2271, 2271, 2271, 2271, - 190, 17, 66, 211, 106, 189, 156, 155, 65, -1000, - 238, 2271, -1000, -1000, -1000, 70, 149, 64, 210, -1000, - 320, -1000, -1000, -1000, 2271, 2271, 2271, 188, -1000, 2271, - -1000, -1000, -1000, 14, -1000, -1000, -1000, -1000, -1000, -1000, - 673, -1000, 978, 6, 3, 187, 185, 184, 183, 182, - 181, 318, -1000, 146, 1100, 317, 244, 2123, 316, -1000, - -1000, 322, 63, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 15, -1000, 70, 76, - -1000, 180, 179, 178, 314, -1000, 199, 1911, -1000, 608, - -1000, 917, 48, 54, -1000, -1000, 313, 312, 311, 310, - 295, 294, 13, -1000, 2, 62, 177, -1000, -1000, -1000, - 432, -1000, -1000, -1000, -1000, -1000, 143, 2271, 2271, -1000, - 2271, 69, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 160, -1000, -1000, -1000, -1000, 162, 2387, -1000, 85, 1110, + 68, 67, -13, -1000, 2344, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 2387, 2387, 2387, 2387, 2387, 2387, + 197, 26, 78, 231, 126, 196, 161, 159, 75, -1000, + 236, 2387, -1000, -1000, -1000, 87, 158, 74, 217, -1000, + 324, -1000, -1000, -1000, 2387, 2387, 2387, 192, -1000, 2387, + -1000, -1000, -1000, 18, -1000, -1000, -1000, -1000, -1000, -1000, + 744, 679, -1000, 1049, -20, 12, 191, 190, 189, 188, + 187, 186, 323, -1000, 157, 1171, 322, 251, 2194, 321, + -1000, -1000, 328, 71, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 24, -1000, 87, + 81, -1000, 185, 184, 183, 311, -1000, 204, 1982, -1000, + 614, -1000, 988, -1000, 11, 70, 927, 49, 55, -1000, + 2387, -1000, 306, 305, 301, 298, 296, 291, 20, -1000, + 5, 61, 182, -1000, -1000, -1000, 1929, -1000, -1000, -1000, + -1000, -1000, 156, 2387, 2387, -1000, 2387, 80, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 10, -1000, 1858, -1000, 141, -1000, -1000, -1000, 281, 278, - 275, 9, 14, 1858, 12, -1000, -8, -29, 61, 856, - 43, 47, -1000, -1000, 1, 5, -1, -4, -3, -10, - -1000, 140, -1000, 1100, 798, -1000, -1000, -1000, 176, 175, - -1000, 2271, -1000, 138, 41, -1000, -9, -11, -12, -1000, - 137, 107, 7, -1000, -1000, -1000, 737, 35, 31, -1000, - -1000, -1000, 132, 1699, 99, -1000, 131, 1699, 98, -1000, - 130, 1699, 95, -1000, -1000, -1000, 253, 249, -1000, -1000, - -1000, -1000, 129, -1000, 127, -1000, 126, -1000, -1000, 164, - -1000, -1000, 56, 37, -1000, 1537, 1699, -1000, 1483, 1699, - -1000, 1429, 1699, -15, -19, -1000, -1000, -1000, 145, -1000, - -1000, -1000, 123, 737, 737, -1000, 1375, -1000, 1321, -1000, - 1267, -1000, 122, 1699, 94, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1213, 1699, -1000, 1159, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 17, -1000, 1876, -1000, + 152, -1000, -1000, -1000, 286, 284, 277, 14, 18, 1876, + 19, -1000, -4, -5, 40, 52, -1000, 808, 37, 48, + -1000, -1000, -29, 10, 7, 8, 3, 4, 1, -1000, + 149, -1000, 1171, 869, -1000, -1000, -1000, 181, 180, -1000, + 2387, -1000, 148, 35, -1000, 0, -2, -8, -1000, 147, + 120, 16, -1000, -1000, -1000, -1000, -1000, -1000, 57, 54, + -1000, -1000, 146, 1770, 111, -1000, 141, 1770, 104, -1000, + 140, 1770, 103, -1000, -1000, -1000, 274, 269, -1000, -1000, + -1000, -1000, 139, -1000, 138, -1000, 137, -1000, -1000, 169, + -1000, 808, 808, -1000, 1608, 1770, -1000, 1554, 1770, -1000, + 1500, 1770, -9, -16, -1000, -1000, -1000, 164, -1000, -1000, + -1000, 136, -1000, -1000, -1000, 1446, -1000, 1392, -1000, 1338, + -1000, 135, 1770, 92, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 1284, 1770, -1000, 1230, -1000, } var protoPgo = [...]int{ - 0, 382, 381, 289, 327, 380, 377, 3, 376, 11, - 14, 375, 374, 373, 36, 12, 8, 28, 27, 372, - 16, 0, 370, 369, 368, 367, 364, 21, 363, 362, - 360, 9, 359, 358, 357, 10, 356, 355, 13, 354, - 352, 350, 349, 29, 348, 347, 346, 278, 1, 2, - 15, 345, 24, 344, 341, 32, 340, 339, 25, 23, - 338, 273, 34, 334, 333, 252, 31, 332, 17, 331, - 30, 329, 328, 5, + 0, 388, 386, 286, 333, 385, 384, 2, 383, 9, + 7, 382, 381, 380, 35, 8, 4, 23, 22, 379, + 16, 12, 377, 376, 375, 372, 370, 19, 369, 367, + 365, 10, 362, 361, 360, 21, 358, 357, 14, 356, + 355, 354, 353, 26, 352, 351, 350, 284, 0, 1, + 11, 346, 18, 345, 344, 28, 343, 340, 20, 15, + 339, 277, 29, 338, 337, 272, 27, 335, 17, 334, + 24, 325, 178, 13, } var protoR1 = [...]int{ @@ -612,41 +624,41 @@ var protoR1 = [...]int{ 19, 19, 19, 19, 19, 19, 19, 19, 68, 68, 18, 38, 38, 38, 37, 37, 37, 37, 37, 37, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, - 13, 13, 13, 35, 35, 35, 35, 35, 35, 31, - 31, 32, 32, 33, 33, 34, 34, 40, 40, 40, - 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, - 41, 41, 41, 15, 9, 9, 8, 43, 43, 43, - 43, 43, 43, 42, 51, 51, 51, 50, 50, 50, - 50, 50, 50, 39, 39, 44, 44, 45, 45, 46, - 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 60, 60, 58, 58, 56, 56, 56, 59, - 59, 57, 57, 57, 20, 20, 53, 53, 54, 54, - 55, 52, 52, 61, 63, 63, 63, 62, 62, 62, - 62, 62, 62, 64, 64, 47, 49, 49, 49, 48, - 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 65, 67, 67, 67, 66, 66, 66, 66, - 66, 69, 71, 71, 71, 70, 70, 70, 70, 70, - 72, 72, 73, 73, 11, 11, 11, 10, 10, 10, - 10, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 36, 36, 13, 13, 13, 13, 35, 35, 35, 35, + 35, 35, 31, 31, 32, 32, 33, 33, 34, 34, + 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, + 41, 41, 41, 41, 41, 41, 15, 9, 9, 8, + 43, 43, 43, 43, 43, 43, 42, 51, 51, 51, + 50, 50, 50, 50, 50, 50, 39, 39, 44, 44, + 45, 45, 46, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 60, 60, 58, 58, 56, + 56, 56, 59, 59, 57, 57, 57, 20, 20, 53, + 53, 54, 54, 55, 52, 52, 61, 63, 63, 63, + 62, 62, 62, 62, 62, 62, 64, 64, 47, 49, + 49, 49, 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 65, 67, 67, 67, 66, + 66, 66, 66, 66, 69, 71, 71, 71, 70, 70, + 70, 70, 70, 72, 72, 73, 73, 11, 11, 11, + 10, 10, 10, 10, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, + 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 26, 26, 26, 26, 26, 26, + 24, 24, 24, 24, 24, 24, 24, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, - 26, 26, 26, 26, 26, 25, 25, 25, 25, 25, + 26, 26, 26, 26, 26, 26, 26, 26, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, - 25, 25, 25, 25, 21, 21, 21, 21, 21, 21, + 25, 25, 25, 25, 25, 25, 25, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, } var protoR2 = [...]int{ @@ -656,22 +668,23 @@ var protoR2 = [...]int{ 5, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 2, 1, 2, 3, 1, 2, 0, 1, 2, 2, 2, 2, 1, - 3, 4, 5, 5, 3, 2, 5, 4, 5, 4, - 1, 3, 3, 1, 3, 3, 5, 3, 5, 1, - 2, 1, 2, 1, 2, 1, 2, 6, 6, 6, - 7, 7, 7, 5, 6, 6, 6, 6, 7, 7, - 7, 5, 6, 3, 1, 3, 3, 8, 8, 8, - 9, 9, 9, 5, 2, 1, 0, 1, 1, 1, - 1, 2, 1, 5, 6, 7, 8, 5, 6, 6, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 3, 4, 1, 3, 1, 3, 3, 1, - 3, 1, 3, 3, 1, 2, 3, 1, 3, 1, - 3, 1, 3, 5, 2, 1, 0, 1, 1, 1, - 1, 2, 1, 4, 5, 5, 2, 1, 0, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 2, 1, 5, 2, 1, 0, 1, 1, 1, 2, - 1, 5, 2, 1, 0, 1, 1, 1, 2, 1, - 6, 8, 4, 3, 2, 1, 0, 1, 1, 2, + 3, 3, 4, 4, 5, 5, 3, 2, 5, 4, + 5, 4, 1, 3, 5, 3, 1, 3, 3, 5, + 3, 5, 1, 2, 1, 2, 1, 2, 1, 2, + 6, 6, 6, 7, 7, 7, 5, 6, 6, 6, + 6, 7, 7, 7, 5, 6, 3, 1, 3, 3, + 8, 8, 8, 9, 9, 9, 5, 2, 1, 0, + 1, 1, 1, 1, 2, 1, 5, 6, 7, 8, + 5, 6, 6, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 3, 4, 1, 3, 1, + 3, 3, 1, 3, 1, 3, 3, 1, 2, 3, + 1, 3, 1, 3, 1, 3, 5, 2, 1, 0, + 1, 1, 1, 1, 2, 1, 4, 5, 5, 2, + 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 1, 5, 2, 1, 0, 1, + 1, 1, 2, 1, 5, 2, 1, 0, 1, 1, + 1, 2, 1, 6, 8, 4, 3, 2, 1, 0, + 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -691,7 +704,6 @@ var protoR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, } var protoChk = [...]int{ @@ -730,96 +742,98 @@ var protoChk = [...]int{ 62, 59, 55, -62, 52, 51, -59, -57, -20, 5, 64, 55, -66, 52, -31, -31, -31, -21, -27, 59, 55, -70, 52, -21, 55, -38, 60, 52, 60, 52, - 53, -18, 62, -31, 2, -21, -21, -21, -21, -21, - -21, 51, 52, -15, 67, 60, 40, 54, 51, 52, - 52, 60, -22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, 33, 34, -27, -20, 52, 60, 40, - 5, -21, -21, -21, 51, -27, -73, 65, -17, 67, - -18, 62, -38, 2, 68, 68, 51, 51, 51, 51, - 51, 51, 5, 52, -9, -8, -14, -58, 5, 41, - -51, -50, -7, -39, -44, 52, 2, -34, 36, -30, - 59, -25, 7, 8, 9, 10, 11, 12, 14, 15, - 16, 17, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, - 5, -52, 60, 52, -15, -59, -20, 41, 51, 51, - 51, 5, 49, 48, -31, 68, -35, 2, -16, 62, - -38, 2, 61, 61, 5, 5, 5, 5, 5, 5, - 52, -15, 68, 60, 51, 55, -50, 52, -21, -21, - -27, 59, 52, -15, -31, 52, 5, 5, 5, 52, - -15, -73, -31, 66, 68, 68, 60, -38, 2, 61, - 61, 52, -15, 54, -15, 52, -15, 54, -15, 52, + 53, 67, -18, 62, -27, 2, -21, -21, -21, -21, + -21, -21, 51, 52, -15, 67, 60, 40, 54, 51, + 52, 52, 60, -22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, -27, -20, 52, 60, + 40, 5, -21, -21, -21, 51, -27, -73, 65, -17, + 67, -18, 62, 68, -35, -16, 62, -38, 2, 68, + 57, 68, 51, 51, 51, 51, 51, 51, 5, 52, + -9, -8, -14, -58, 5, 41, -51, -50, -7, -39, + -44, 52, 2, -34, 36, -30, 59, -25, 7, 8, + 9, 10, 11, 12, 14, 15, 16, 17, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 5, -52, 60, 52, + -15, -59, -20, 41, 51, 51, 51, 5, 49, 48, + -31, 68, -35, 2, -38, 2, 68, 60, -38, 2, + 61, 61, -27, 5, 5, 5, 5, 5, 5, 52, + -15, 68, 60, 51, 55, -50, 52, -21, -21, -27, + 59, 52, -15, -31, 52, 5, 5, 5, 52, -15, + -73, -31, 66, 68, 68, 61, 61, -35, 61, 61, + 68, 52, -15, 54, -15, 52, -15, 54, -15, 52, -15, 54, -15, 52, -9, -16, 51, 51, -27, 52, 61, 52, -15, 52, -15, 52, -15, 52, 52, 54, - 66, -35, 61, 61, 52, -49, 54, 52, -49, 54, - 52, -49, 54, 5, 5, 52, 52, 52, -11, -10, - -7, 52, 2, 60, 60, 55, -49, 55, -49, 55, - -49, 52, -15, 54, -15, 55, -10, 52, -35, -35, - 55, 55, 55, 52, -49, 54, 55, -49, 55, + 66, 60, 60, 52, -49, 54, 52, -49, 54, 52, + -49, 54, 5, 5, 52, 52, 52, -11, -10, -7, + 52, 2, -35, -35, 55, -49, 55, -49, 55, -49, + 52, -15, 54, -15, 55, -10, 52, 55, 55, 55, + 52, -49, 54, 55, -49, 55, } var protoDef = [...]int{ -2, -2, -2, -2, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 0, 0, 0, 0, 0, 0, 0, -2, 5, 0, 15, 0, 0, 0, 48, - 0, 22, 374, 375, 376, 377, 378, 379, 380, 381, - 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, - 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, - 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, - 412, 413, 414, 415, 416, 0, 31, 33, 0, 0, - 0, 0, 79, 0, 0, 0, 18, 0, 0, 49, - 21, 0, 0, 0, 0, -2, -2, -2, 80, -2, + 0, 22, 377, 378, 379, 380, 381, 382, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, + 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, + 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 0, 31, 33, 0, 0, + 0, 0, 82, 0, 0, 0, 18, 0, 0, 49, + 21, 0, 0, 0, 0, -2, -2, -2, 83, -2, 17, 19, 20, 23, 0, 35, 36, 37, 38, 39, - -2, 40, 0, 0, 45, 32, 34, 0, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, - 191, 0, 0, 0, 0, 0, 0, 0, 0, 157, - 81, 0, 247, 24, 221, 222, 223, 224, 225, 226, - 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, - 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, - 248, 249, 250, 251, 252, 253, 0, 165, 167, 168, - 169, 170, 172, 0, 0, 159, 294, 295, 296, 297, - 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, - 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, - 328, 329, 330, 331, 332, 333, 334, 0, 194, 196, - 197, 198, 200, 0, 0, 0, 0, 83, 0, 26, - 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, - 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, - 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, - 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, - 0, 203, 205, 206, 207, 209, 0, 30, 0, -2, - 54, 59, 0, 70, 0, 41, 44, 47, 42, 43, - 46, 175, 176, 190, 0, 403, 0, 403, 0, 403, - 0, 0, 144, 146, 0, 0, 0, 0, 161, 82, - 0, 0, 163, 164, 171, 0, 0, 149, 151, 154, - 0, 192, 193, 199, 0, 0, 0, 0, 84, 0, - 201, 202, 208, 0, 50, 52, 55, 56, 57, 58, - 0, 65, -2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 142, 0, 0, 0, 0, -2, 0, 156, - 160, 0, 0, 130, 131, 132, 133, 134, 135, 136, - 137, 138, 139, 140, 141, 25, 0, 158, 0, 0, - 155, 0, 0, 0, 0, 27, 0, 0, 60, 0, - 64, -2, 0, 59, 71, 72, 0, 0, 0, 0, - 0, 0, 0, 143, 0, 104, 0, 145, 147, 148, - 0, 115, 117, 118, 119, 120, 122, 0, 360, 85, - 0, 28, 335, 336, 337, 338, 339, 340, 341, 342, - 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, - 353, 354, 355, 356, 357, 358, 359, 361, 362, 363, - 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, - 0, 162, 0, 173, 0, 150, 152, 153, 0, 0, - 0, 0, 0, 415, 0, 61, 0, 0, 73, -2, - 0, 59, 67, 69, 0, 0, 0, 0, 0, 0, - 93, 0, 103, 0, 0, 113, 114, 121, 0, 0, - 86, 0, 127, 0, 0, 174, 0, 0, 0, 101, - 0, 0, 0, 213, 62, 63, 0, 0, 59, 66, - 68, 87, 0, -2, 0, 88, 0, -2, 0, 89, - 0, -2, 0, 94, 105, 106, 0, 0, 29, 128, - 129, 95, 0, 96, 0, 97, 0, 102, 210, -2, - 212, 74, 75, 77, 90, 0, -2, 91, 0, -2, - 92, 0, -2, 0, 0, 98, 99, 100, 0, 215, - 217, 218, 220, 0, 0, 107, 0, 108, 0, 109, - 0, 123, 0, -2, 0, 211, 214, 219, 76, 78, - 110, 111, 112, 124, 0, -2, 125, 0, 126, + -2, 40, 0, 0, 45, 32, 34, 0, 180, 182, + 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 194, 0, 0, 0, 0, 0, 0, 0, 0, 160, + 84, 0, 250, 24, 224, 225, 226, 227, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, + 251, 252, 253, 254, 255, 256, 0, 168, 170, 171, + 172, 173, 175, 0, 0, 162, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, + 331, 332, 333, 334, 335, 336, 337, 0, 197, 199, + 200, 201, 203, 0, 0, 0, 0, 86, 0, 26, + 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, + 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, + 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, + 0, 206, 208, 209, 210, 212, 0, 30, 0, -2, + 54, 59, 0, 72, 0, 41, 44, 47, 42, 43, + 46, 178, 179, 193, 0, 406, 0, 406, 0, 406, + 0, 0, 147, 149, 0, 0, 0, 0, 164, 85, + 0, 0, 166, 167, 174, 0, 0, 152, 154, 157, + 0, 195, 196, 202, 0, 0, 0, 0, 87, 0, + 204, 205, 211, 0, 50, 52, 55, 56, 57, 58, + 0, 0, 67, -2, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 145, 0, 0, 0, 0, -2, 0, + 159, 163, 0, 0, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 25, 0, 161, 0, + 0, 158, 0, 0, 0, 0, 27, 0, 0, 60, + 0, 66, -2, 61, 0, 76, -2, 0, 59, 73, + 0, 75, 0, 0, 0, 0, 0, 0, 0, 146, + 0, 107, 0, 148, 150, 151, 0, 118, 120, 121, + 122, 123, 125, 0, 363, 88, 0, 28, 338, 339, + 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, + 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, + 360, 361, 362, 364, 365, 366, 367, 368, 369, 370, + 371, 372, 373, 374, 375, 376, 0, 165, 0, 176, + 0, 153, 155, 156, 0, 0, 0, 0, 0, 418, + 0, 62, 0, 0, 0, 59, 63, 0, 0, 59, + 69, 71, 0, 0, 0, 0, 0, 0, 0, 96, + 0, 106, 0, 0, 116, 117, 124, 0, 0, 89, + 0, 130, 0, 0, 177, 0, 0, 0, 104, 0, + 0, 0, 216, 64, 65, 68, 70, 77, 78, 80, + 74, 90, 0, -2, 0, 91, 0, -2, 0, 92, + 0, -2, 0, 97, 108, 109, 0, 0, 29, 131, + 132, 98, 0, 99, 0, 100, 0, 105, 213, -2, + 215, 0, 0, 93, 0, -2, 94, 0, -2, 95, + 0, -2, 0, 0, 101, 102, 103, 0, 218, 220, + 221, 223, 79, 81, 110, 0, 111, 0, 112, 0, + 126, 0, -2, 0, 214, 217, 222, 113, 114, 115, + 127, 0, -2, 128, 0, 129, } var protoTok1 = [...]int{ @@ -1413,61 +1427,55 @@ protodefault: protoDollar = protoS[protopt-1 : protopt+1] //line proto.y:281 { - if protoDollar[1].id.Val == "true" || protoDollar[1].id.Val == "false" { - protoVAL.v = ast.NewBoolLiteralNode(protoDollar[1].id.ToKeyword()) - } else if protoDollar[1].id.Val == "inf" || protoDollar[1].id.Val == "nan" { - protoVAL.v = ast.NewSpecialFloatLiteralNode(protoDollar[1].id.ToKeyword()) - } else { - protoVAL.v = protoDollar[1].id - } + protoVAL.v = protoDollar[1].id } case 40: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:291 +//line proto.y:285 { protoVAL.v = protoDollar[1].f } case 41: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:294 +//line proto.y:288 { protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) } case 42: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:297 +//line proto.y:291 { protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f) } case 43: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:300 +//line proto.y:294 { f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) } case 44: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:304 +//line proto.y:298 { f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword()) protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f) } case 45: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:308 +//line proto.y:302 { protoVAL.v = protoDollar[1].i } case 46: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:311 +//line proto.y:305 { protoVAL.v = ast.NewPositiveUintLiteralNode(protoDollar[1].b, protoDollar[2].i) } case 47: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:314 +//line proto.y:308 { if protoDollar[2].i.Val > math.MaxInt64+1 { // can't represent as int so treat as float literal @@ -1478,26 +1486,26 @@ protodefault: } case 48: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:323 +//line proto.y:317 { protoVAL.str = &stringList{protoDollar[1].s, nil} } case 49: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:326 +//line proto.y:320 { protoVAL.str = &stringList{protoDollar[1].s, protoDollar[2].str} } case 50: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:330 +//line proto.y:324 { fields, delims := protoDollar[2].msgLit.toNodes() protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) } case 51: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:335 +//line proto.y:329 { if protoDollar[1].msgEntry != nil { protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, nil} @@ -1507,7 +1515,7 @@ protodefault: } case 52: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:342 +//line proto.y:336 { if protoDollar[1].msgEntry != nil { protoVAL.msgLit = &messageFieldList{protoDollar[1].msgEntry, protoDollar[2].msgLit} @@ -1517,13 +1525,13 @@ protodefault: } case 53: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:349 +//line proto.y:343 { protoVAL.msgLit = nil } case 54: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:353 +//line proto.y:347 { if protoDollar[1].msgField != nil { protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, nil} @@ -1533,7 +1541,7 @@ protodefault: } case 55: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:360 +//line proto.y:354 { if protoDollar[1].msgField != nil { protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} @@ -1543,7 +1551,7 @@ protodefault: } case 56: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:367 +//line proto.y:361 { if protoDollar[1].msgField != nil { protoVAL.msgEntry = &messageFieldEntry{protoDollar[1].msgField, protoDollar[2].b} @@ -1553,25 +1561,25 @@ protodefault: } case 57: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:374 +//line proto.y:368 { protoVAL.msgEntry = nil } case 58: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:377 +//line proto.y:371 { protoVAL.msgEntry = nil } case 59: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:380 +//line proto.y:374 { protoVAL.msgEntry = nil } case 60: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:384 +//line proto.y:378 { if protoDollar[1].ref != nil { protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) @@ -1580,8 +1588,19 @@ protodefault: } } case 61: + protoDollar = protoS[protopt-3 : protopt+1] +//line proto.y:385 + { + if protoDollar[1].ref != nil { + val := ast.NewArrayLiteralNode(protoDollar[2].b, nil, nil, protoDollar[3].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } + } + case 62: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:391 +//line proto.y:393 { if protoDollar[1].ref != nil { val := ast.NewArrayLiteralNode(protoDollar[3].b, nil, nil, protoDollar[4].b) @@ -1590,9 +1609,21 @@ protodefault: protoVAL.msgField = nil } } - case 62: + case 63: + protoDollar = protoS[protopt-4 : protopt+1] +//line proto.y:401 + { + if protoDollar[1].ref != nil { + vals, commas := protoDollar[3].sl.toNodes() + val := ast.NewArrayLiteralNode(protoDollar[2].b, vals, commas, protoDollar[4].b) + protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, val) + } else { + protoVAL.msgField = nil + } + } + case 64: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:399 +//line proto.y:410 { if protoDollar[1].ref != nil { vals, commas := protoDollar[4].sl.toNodes() @@ -1602,15 +1633,15 @@ protodefault: protoVAL.msgField = nil } } - case 63: + case 65: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:408 +//line proto.y:419 { protoVAL.msgField = nil } - case 64: + case 66: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:411 +//line proto.y:422 { if protoDollar[1].ref != nil { protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, protoDollar[2].b, protoDollar[3].v) @@ -1618,9 +1649,9 @@ protodefault: protoVAL.msgField = nil } } - case 65: + case 67: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:418 +//line proto.y:429 { if protoDollar[1].ref != nil { protoVAL.msgField = ast.NewMessageFieldNode(protoDollar[1].ref, nil, protoDollar[2].v) @@ -1628,9 +1659,9 @@ protodefault: protoVAL.msgField = nil } } - case 66: + case 68: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:425 +//line proto.y:436 { if protoDollar[1].ref != nil { fields, delims := protoDollar[4].msgLit.toNodes() @@ -1640,9 +1671,9 @@ protodefault: protoVAL.msgField = nil } } - case 67: + case 69: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:434 +//line proto.y:445 { if protoDollar[1].ref != nil { fields, delims := protoDollar[3].msgLit.toNodes() @@ -1652,292 +1683,298 @@ protodefault: protoVAL.msgField = nil } } - case 68: + case 70: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:443 +//line proto.y:454 { protoVAL.msgField = nil } - case 69: + case 71: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:446 +//line proto.y:457 { protoVAL.msgField = nil } - case 70: + case 72: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:450 +//line proto.y:461 { protoVAL.ref = ast.NewFieldReferenceNode(protoDollar[1].id) } - case 71: + case 73: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:453 +//line proto.y:464 { - protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b) + protoVAL.ref = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b) } - case 72: + case 74: + protoDollar = protoS[protopt-5 : protopt+1] +//line proto.y:467 + { + protoVAL.ref = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b) + } + case 75: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:456 +//line proto.y:470 { protoVAL.ref = nil } - case 73: + case 76: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:460 +//line proto.y:474 { protoVAL.sl = &valueList{protoDollar[1].v, nil, nil} } - case 74: + case 77: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:463 +//line proto.y:477 { protoVAL.sl = &valueList{protoDollar[1].v, protoDollar[2].b, protoDollar[3].sl} } - case 75: + case 78: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:466 +//line proto.y:480 { fields, delims := protoDollar[2].msgLit.toNodes() msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) protoVAL.sl = &valueList{msg, nil, nil} } - case 76: + case 79: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:471 +//line proto.y:485 { fields, delims := protoDollar[2].msgLit.toNodes() msg := ast.NewMessageLiteralNode(protoDollar[1].b, fields, delims, protoDollar[3].b) protoVAL.sl = &valueList{msg, protoDollar[4].b, protoDollar[5].sl} } - case 77: + case 80: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:476 +//line proto.y:490 { protoVAL.sl = nil } - case 78: + case 81: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:479 +//line proto.y:493 { protoVAL.sl = protoDollar[5].sl } - case 79: + case 82: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:483 +//line proto.y:497 { protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 80: + case 83: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:486 +//line proto.y:500 { protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 81: + case 84: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:490 +//line proto.y:504 { protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 82: + case 85: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:493 +//line proto.y:507 { protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 83: + case 86: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:497 +//line proto.y:511 { protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 84: + case 87: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:500 +//line proto.y:514 { protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 85: + case 88: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:504 +//line proto.y:518 { protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil) } - case 86: + case 89: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:507 +//line proto.y:521 { protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b) } - case 87: + case 90: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:511 +//line proto.y:525 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 88: + case 91: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:514 +//line proto.y:528 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 89: + case 92: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:517 +//line proto.y:531 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 90: + case 93: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:520 +//line proto.y:534 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 91: + case 94: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:523 +//line proto.y:537 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 92: + case 95: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:526 +//line proto.y:540 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 93: + case 96: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:529 +//line proto.y:543 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 94: + case 97: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:532 +//line proto.y:546 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 95: + case 98: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:536 +//line proto.y:550 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 96: + case 99: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:539 +//line proto.y:553 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 97: + case 100: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:542 +//line proto.y:556 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b) } - case 98: + case 101: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:545 +//line proto.y:559 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 99: + case 102: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:548 +//line proto.y:562 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 100: + case 103: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:551 +//line proto.y:565 { protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b) } - case 101: + case 104: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:554 +//line proto.y:568 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 102: + case 105: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:557 +//line proto.y:571 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 103: + case 106: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:561 +//line proto.y:575 { opts, commas := protoDollar[2].opts.toNodes() protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, opts, commas, protoDollar[3].b) } - case 104: + case 107: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:566 +//line proto.y:580 { protoVAL.opts = &compactOptionList{protoDollar[1].opt, nil, nil} } - case 105: + case 108: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:569 +//line proto.y:583 { protoVAL.opts = &compactOptionList{protoDollar[1].opt, protoDollar[2].b, protoDollar[3].opts} } - case 106: + case 109: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:573 +//line proto.y:587 { refs, dots := protoDollar[1].optNms.toNodes() optName := ast.NewOptionNameNode(refs, dots) protoVAL.opt = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v) } - case 107: + case 110: protoDollar = protoS[protopt-8 : protopt+1] -//line proto.y:579 +//line proto.y:593 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 108: + case 111: protoDollar = protoS[protopt-8 : protopt+1] -//line proto.y:582 +//line proto.y:596 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 109: + case 112: protoDollar = protoS[protopt-8 : protopt+1] -//line proto.y:585 +//line proto.y:599 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 110: + case 113: protoDollar = protoS[protopt-9 : protopt+1] -//line proto.y:588 +//line proto.y:602 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 111: + case 114: protoDollar = protoS[protopt-9 : protopt+1] -//line proto.y:591 +//line proto.y:605 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 112: + case 115: protoDollar = protoS[protopt-9 : protopt+1] -//line proto.y:594 +//line proto.y:608 { protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgDecls, protoDollar[9].b) } - case 113: + case 116: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:598 +//line proto.y:612 { protoVAL.oo = ast.NewOneOfNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooDecls, protoDollar[5].b) } - case 114: + case 117: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:602 +//line proto.y:616 { if protoDollar[2].ooDecl != nil { protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecl) @@ -1945,9 +1982,9 @@ protodefault: protoVAL.ooDecls = protoDollar[1].ooDecls } } - case 115: + case 118: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:609 +//line proto.y:623 { if protoDollar[1].ooDecl != nil { protoVAL.ooDecls = []ast.OneOfElement{protoDollar[1].ooDecl} @@ -1955,218 +1992,218 @@ protodefault: protoVAL.ooDecls = nil } } - case 116: + case 119: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:616 +//line proto.y:630 { protoVAL.ooDecls = nil } - case 117: + case 120: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:620 +//line proto.y:634 { protoVAL.ooDecl = protoDollar[1].opt } - case 118: + case 121: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:623 +//line proto.y:637 { protoVAL.ooDecl = protoDollar[1].fld } - case 119: + case 122: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:626 +//line proto.y:640 { protoVAL.ooDecl = protoDollar[1].grp } - case 120: + case 123: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:629 +//line proto.y:643 { protoVAL.ooDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 121: + case 124: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:632 +//line proto.y:646 { protoVAL.ooDecl = nil } - case 122: + case 125: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:635 +//line proto.y:649 { protoVAL.ooDecl = nil } - case 123: + case 126: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:639 +//line proto.y:653 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 124: + case 127: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:642 +//line proto.y:656 { protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 125: + case 128: protoDollar = protoS[protopt-7 : protopt+1] -//line proto.y:646 +//line proto.y:660 { protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgDecls, protoDollar[7].b) } - case 126: + case 129: protoDollar = protoS[protopt-8 : protopt+1] -//line proto.y:649 +//line proto.y:663 { protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgDecls, protoDollar[8].b) } - case 127: + case 130: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:653 +//line proto.y:667 { protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b) } - case 128: + case 131: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:656 +//line proto.y:670 { protoVAL.mapFld = ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b) } - case 129: + case 132: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:660 +//line proto.y:674 { protoVAL.mapType = ast.NewMapTypeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].id, protoDollar[4].b, protoDollar[5].tid, protoDollar[6].b) } - case 142: + case 145: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:677 +//line proto.y:691 { ranges, commas := protoDollar[2].rngs.toNodes() protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, nil, protoDollar[3].b) } - case 143: + case 146: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:681 +//line proto.y:695 { ranges, commas := protoDollar[2].rngs.toNodes() protoVAL.ext = ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].cmpctOpts, protoDollar[4].b) } - case 144: + case 147: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:686 +//line proto.y:700 { protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} } - case 145: + case 148: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:689 +//line proto.y:703 { protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} } - case 146: + case 149: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:693 +//line proto.y:707 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, nil, nil, nil) } - case 147: + case 150: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:696 +//line proto.y:710 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), protoDollar[3].i, nil) } - case 148: + case 151: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:699 +//line proto.y:713 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) } - case 149: + case 152: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:703 +//line proto.y:717 { protoVAL.rngs = &rangeList{protoDollar[1].rng, nil, nil} } - case 150: + case 153: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:706 +//line proto.y:720 { protoVAL.rngs = &rangeList{protoDollar[1].rng, protoDollar[2].b, protoDollar[3].rngs} } - case 151: + case 154: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:710 +//line proto.y:724 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, nil, nil, nil) } - case 152: + case 155: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:713 +//line proto.y:727 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), protoDollar[3].il, nil) } - case 153: + case 156: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:716 +//line proto.y:730 { protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword()) } - case 154: + case 157: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:720 +//line proto.y:734 { protoVAL.il = protoDollar[1].i } - case 155: + case 158: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:723 +//line proto.y:737 { protoVAL.il = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i) } - case 156: + case 159: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:727 +//line proto.y:741 { ranges, commas := protoDollar[2].rngs.toNodes() protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) } - case 158: + case 161: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:733 +//line proto.y:747 { ranges, commas := protoDollar[2].rngs.toNodes() protoVAL.resvd = ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), ranges, commas, protoDollar[3].b) } - case 160: + case 163: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:739 +//line proto.y:753 { names, commas := protoDollar[2].names.toNodes() protoVAL.resvd = ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), names, commas, protoDollar[3].b) } - case 161: + case 164: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:744 +//line proto.y:758 { protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), nil, nil} } - case 162: + case 165: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:747 +//line proto.y:761 { protoVAL.names = &nameList{protoDollar[1].str.toStringValueNode(), protoDollar[2].b, protoDollar[3].names} } - case 163: + case 166: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:751 +//line proto.y:765 { protoVAL.en = ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enDecls, protoDollar[5].b) } - case 164: + case 167: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:755 +//line proto.y:769 { if protoDollar[2].enDecl != nil { protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecl) @@ -2174,9 +2211,9 @@ protodefault: protoVAL.enDecls = protoDollar[1].enDecls } } - case 165: + case 168: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:762 +//line proto.y:776 { if protoDollar[1].enDecl != nil { protoVAL.enDecls = []ast.EnumElement{protoDollar[1].enDecl} @@ -2184,69 +2221,69 @@ protodefault: protoVAL.enDecls = nil } } - case 166: + case 169: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:769 +//line proto.y:783 { protoVAL.enDecls = nil } - case 167: + case 170: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:773 +//line proto.y:787 { protoVAL.enDecl = protoDollar[1].opt } - case 168: + case 171: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:776 +//line proto.y:790 { protoVAL.enDecl = protoDollar[1].env } - case 169: + case 172: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:779 +//line proto.y:793 { protoVAL.enDecl = protoDollar[1].resvd } - case 170: + case 173: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:782 +//line proto.y:796 { protoVAL.enDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 171: + case 174: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:785 +//line proto.y:799 { protoVAL.enDecl = nil } - case 172: + case 175: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:788 +//line proto.y:802 { protoVAL.enDecl = nil } - case 173: + case 176: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:792 +//line proto.y:806 { protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, protoDollar[4].b) } - case 174: + case 177: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:795 +//line proto.y:809 { protoVAL.env = ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, protoDollar[5].b) } - case 175: + case 178: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:799 +//line proto.y:813 { protoVAL.msg = ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgDecls, protoDollar[5].b) } - case 176: + case 179: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:803 +//line proto.y:817 { if protoDollar[2].msgDecl != nil { protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecl) @@ -2254,9 +2291,9 @@ protodefault: protoVAL.msgDecls = protoDollar[1].msgDecls } } - case 177: + case 180: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:810 +//line proto.y:824 { if protoDollar[1].msgDecl != nil { protoVAL.msgDecls = []ast.MessageElement{protoDollar[1].msgDecl} @@ -2264,99 +2301,99 @@ protodefault: protoVAL.msgDecls = nil } } - case 178: + case 181: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:817 +//line proto.y:831 { protoVAL.msgDecls = nil } - case 179: + case 182: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:821 +//line proto.y:835 { protoVAL.msgDecl = protoDollar[1].fld } - case 180: + case 183: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:824 +//line proto.y:838 { protoVAL.msgDecl = protoDollar[1].en } - case 181: + case 184: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:827 +//line proto.y:841 { protoVAL.msgDecl = protoDollar[1].msg } - case 182: + case 185: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:830 +//line proto.y:844 { protoVAL.msgDecl = protoDollar[1].extend } - case 183: + case 186: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:833 +//line proto.y:847 { protoVAL.msgDecl = protoDollar[1].ext } - case 184: + case 187: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:836 +//line proto.y:850 { protoVAL.msgDecl = protoDollar[1].grp } - case 185: + case 188: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:839 +//line proto.y:853 { protoVAL.msgDecl = protoDollar[1].opt } - case 186: + case 189: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:842 +//line proto.y:856 { protoVAL.msgDecl = protoDollar[1].oo } - case 187: + case 190: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:845 +//line proto.y:859 { protoVAL.msgDecl = protoDollar[1].mapFld } - case 188: + case 191: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:848 +//line proto.y:862 { protoVAL.msgDecl = protoDollar[1].resvd } - case 189: + case 192: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:851 +//line proto.y:865 { protoVAL.msgDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 190: + case 193: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:854 +//line proto.y:868 { protoVAL.msgDecl = nil } - case 191: + case 194: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:857 +//line proto.y:871 { protoVAL.msgDecl = nil } - case 192: + case 195: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:861 +//line proto.y:875 { protoVAL.extend = ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extDecls, protoDollar[5].b) } - case 193: + case 196: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:865 +//line proto.y:879 { if protoDollar[2].extDecl != nil { protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecl) @@ -2364,9 +2401,9 @@ protodefault: protoVAL.extDecls = protoDollar[1].extDecls } } - case 194: + case 197: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:872 +//line proto.y:886 { if protoDollar[1].extDecl != nil { protoVAL.extDecls = []ast.ExtendElement{protoDollar[1].extDecl} @@ -2374,51 +2411,51 @@ protodefault: protoVAL.extDecls = nil } } - case 195: + case 198: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:879 +//line proto.y:893 { protoVAL.extDecls = nil } - case 196: + case 199: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:883 +//line proto.y:897 { protoVAL.extDecl = protoDollar[1].fld } - case 197: + case 200: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:886 +//line proto.y:900 { protoVAL.extDecl = protoDollar[1].grp } - case 198: + case 201: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:889 +//line proto.y:903 { protoVAL.extDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 199: + case 202: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:892 +//line proto.y:906 { protoVAL.extDecl = nil } - case 200: + case 203: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:895 +//line proto.y:909 { protoVAL.extDecl = nil } - case 201: + case 204: protoDollar = protoS[protopt-5 : protopt+1] -//line proto.y:899 +//line proto.y:913 { protoVAL.svc = ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcDecls, protoDollar[5].b) } - case 202: + case 205: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:903 +//line proto.y:917 { if protoDollar[2].svcDecl != nil { protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecl) @@ -2426,9 +2463,9 @@ protodefault: protoVAL.svcDecls = protoDollar[1].svcDecls } } - case 203: + case 206: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:910 +//line proto.y:924 { if protoDollar[1].svcDecl != nil { protoVAL.svcDecls = []ast.ServiceElement{protoDollar[1].svcDecl} @@ -2436,69 +2473,69 @@ protodefault: protoVAL.svcDecls = nil } } - case 204: + case 207: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:917 +//line proto.y:931 { protoVAL.svcDecls = nil } - case 205: + case 208: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:924 +//line proto.y:938 { protoVAL.svcDecl = protoDollar[1].opt } - case 206: + case 209: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:927 +//line proto.y:941 { protoVAL.svcDecl = protoDollar[1].mtd } - case 207: + case 210: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:930 +//line proto.y:944 { protoVAL.svcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 208: + case 211: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:933 +//line proto.y:947 { protoVAL.svcDecl = nil } - case 209: + case 212: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:936 +//line proto.y:950 { protoVAL.svcDecl = nil } - case 210: + case 213: protoDollar = protoS[protopt-6 : protopt+1] -//line proto.y:940 +//line proto.y:954 { protoVAL.mtd = ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b) } - case 211: + case 214: protoDollar = protoS[protopt-8 : protopt+1] -//line proto.y:943 +//line proto.y:957 { protoVAL.mtd = ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].rpcType, protoDollar[4].id.ToKeyword(), protoDollar[5].rpcType, protoDollar[6].b, protoDollar[7].rpcDecls, protoDollar[8].b) } - case 212: + case 215: protoDollar = protoS[protopt-4 : protopt+1] -//line proto.y:947 +//line proto.y:961 { protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b) } - case 213: + case 216: protoDollar = protoS[protopt-3 : protopt+1] -//line proto.y:950 +//line proto.y:964 { protoVAL.rpcType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b) } - case 214: + case 217: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:954 +//line proto.y:968 { if protoDollar[2].rpcDecl != nil { protoVAL.rpcDecls = append(protoDollar[1].rpcDecls, protoDollar[2].rpcDecl) @@ -2506,9 +2543,9 @@ protodefault: protoVAL.rpcDecls = protoDollar[1].rpcDecls } } - case 215: + case 218: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:961 +//line proto.y:975 { if protoDollar[1].rpcDecl != nil { protoVAL.rpcDecls = []ast.RPCElement{protoDollar[1].rpcDecl} @@ -2516,33 +2553,33 @@ protodefault: protoVAL.rpcDecls = nil } } - case 216: + case 219: protoDollar = protoS[protopt-0 : protopt+1] -//line proto.y:968 +//line proto.y:982 { protoVAL.rpcDecls = nil } - case 217: + case 220: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:972 +//line proto.y:986 { protoVAL.rpcDecl = protoDollar[1].opt } - case 218: + case 221: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:975 +//line proto.y:989 { protoVAL.rpcDecl = ast.NewEmptyDeclNode(protoDollar[1].b) } - case 219: + case 222: protoDollar = protoS[protopt-2 : protopt+1] -//line proto.y:978 +//line proto.y:992 { protoVAL.rpcDecl = nil } - case 220: + case 223: protoDollar = protoS[protopt-1 : protopt+1] -//line proto.y:981 +//line proto.y:995 { protoVAL.rpcDecl = nil } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go index 2104c59a10..02df689a95 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go @@ -105,18 +105,21 @@ func (r *parseResult) generateSourceCodeInfoForOption(sci *sourceCodeInfo, n *as } func (r *parseResult) generateSourceCodeInfoForMessage(sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) { - sci.newLoc(n, path) - + var openBrace ast.Node var decls []ast.MessageElement switch n := n.(type) { case *ast.MessageNode: + openBrace = n.OpenBrace decls = n.Decls case *ast.GroupNode: + openBrace = n.OpenBrace decls = n.Decls case *ast.MapFieldNode: + sci.newLoc(n, path) // map entry so nothing else to do return } + sci.newBlockLoc(n, openBrace, path) sci.newLoc(n.MessageName(), append(path, internal.Message_nameTag)) // matching protoc, which emits the corresponding field type name (for group fields) @@ -179,7 +182,7 @@ func (r *parseResult) generateSourceCodeInfoForMessage(sci *sourceCodeInfo, n as } func (r *parseResult) generateSourceCodeInfoForEnum(sci *sourceCodeInfo, n *ast.EnumNode, path []int32) { - sci.newLoc(n, path) + sci.newBlockLoc(n, n.OpenBrace, path) sci.newLoc(n.Name, append(path, internal.Enum_nameTag)) var optIndex, valIndex, reservedNameIndex, reservedRangeIndex int32 @@ -238,7 +241,7 @@ func (r *parseResult) generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo } func (r *parseResult) generateSourceCodeInfoForExtensions(sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) { - sci.newLoc(n, extendPath) + sci.newBlockLoc(n, n.OpenBrace, extendPath) for _, decl := range n.Decls { switch decl := decl.(type) { case *ast.FieldNode: @@ -255,7 +258,7 @@ func (r *parseResult) generateSourceCodeInfoForExtensions(sci *sourceCodeInfo, n } func (r *parseResult) generateSourceCodeInfoForOneOf(sci *sourceCodeInfo, n *ast.OneOfNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneOfPath []int32) { - sci.newLoc(n, oneOfPath) + sci.newBlockLoc(n, n.OpenBrace, oneOfPath) sci.newLoc(n.Name, append(oneOfPath, internal.OneOf_nameTag)) var optIndex int32 @@ -351,7 +354,7 @@ func (r *parseResult) generateSourceCodeInfoForExtensionRanges(sci *sourceCodeIn } func (r *parseResult) generateSourceCodeInfoForService(sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) { - sci.newLoc(n, path) + sci.newBlockLoc(n, n.OpenBrace, path) sci.newLoc(n.Name, append(path, internal.Service_nameTag)) var optIndex, rpcIndex int32 for _, child := range n.Decls { @@ -366,7 +369,11 @@ func (r *parseResult) generateSourceCodeInfoForService(sci *sourceCodeInfo, n *a } func (r *parseResult) generateSourceCodeInfoForMethod(sci *sourceCodeInfo, n *ast.RPCNode, path []int32) { - sci.newLoc(n, path) + if n.OpenBrace != nil { + sci.newBlockLoc(n, n.OpenBrace, path) + } else { + sci.newLoc(n, path) + } sci.newLoc(n.Name, append(path, internal.Method_nameTag)) if n.Input.Stream != nil { sci.newLoc(n.Input.Stream, append(path, internal.Method_inputStreamTag)) @@ -400,9 +407,22 @@ func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) { }) } +func (sci *sourceCodeInfo) newBlockLoc(n, openBrace ast.Node, path []int32) { + // Block definitions use trailing comments after the open brace "{" as the + // element's trailing comments. For example: + // + // message Foo { // this is a trailing comment for a message + // + // } // not this + // + sci.newLocWithComments(n, n.LeadingComments(), openBrace.TrailingComments(), path) +} + func (sci *sourceCodeInfo) newLoc(n ast.Node, path []int32) { - leadingComments := n.LeadingComments() - trailingComments := n.TrailingComments() + sci.newLocWithComments(n, n.LeadingComments(), n.TrailingComments(), path) +} + +func (sci *sourceCodeInfo) newLocWithComments(n ast.Node, leadingComments, trailingComments []ast.Comment, path []int32) { if sci.commentUsed(leadingComments) { leadingComments = nil } diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt index 41c3926058..f0c4f62696 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt @@ -3,7 +3,7 @@ : desc_test_comments.proto:8:1 -desc_test_comments.proto:141:2 +desc_test_comments.proto:156:2 > syntax: @@ -60,7 +60,7 @@ desc_test_comments.proto:18:34 > message_type[0]: desc_test_comments.proto:25:1 -desc_test_comments.proto:105:2 +desc_test_comments.proto:113:2 Leading detached comment [0]: Multiple white space lines (like above) cannot be preserved... @@ -68,9 +68,6 @@ desc_test_comments.proto:105:2 Leading comments: We need a request for our RPC service below. - Trailing comments: - And next we'll need some extensions... - > message_type[0] > name: @@ -357,7 +354,7 @@ desc_test_comments.proto:52:37 > message_type[0] > field[2]: desc_test_comments.proto:55:9 -desc_test_comments.proto:67:10 +desc_test_comments.proto:69:10 > message_type[0] > field[2] > label: @@ -382,10 +379,13 @@ desc_test_comments.proto:55:51 > message_type[0] > nested_type[0]: desc_test_comments.proto:55:9 -desc_test_comments.proto:67:10 +desc_test_comments.proto:69:10 Leading comments: Group comment + Trailing comments: + trailer for Extras + > message_type[0] > nested_type[0] > name: @@ -401,81 +401,81 @@ desc_test_comments.proto:55:47 > message_type[0] > nested_type[0] > options: -desc_test_comments.proto:57:17 -desc_test_comments.proto:57:52 +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 > message_type[0] > nested_type[0] > options > mfubar: -desc_test_comments.proto:57:17 -desc_test_comments.proto:57:52 +desc_test_comments.proto:59:17 +desc_test_comments.proto:59:52 Leading comments: this is a custom option > message_type[0] > nested_type[0] > field[0]: -desc_test_comments.proto:59:17 -desc_test_comments.proto:59:41 +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:41 > message_type[0] > nested_type[0] > field[0] > label: -desc_test_comments.proto:59:17 -desc_test_comments.proto:59:25 +desc_test_comments.proto:61:17 +desc_test_comments.proto:61:25 > message_type[0] > nested_type[0] > field[0] > type: -desc_test_comments.proto:59:26 -desc_test_comments.proto:59:32 +desc_test_comments.proto:61:26 +desc_test_comments.proto:61:32 > message_type[0] > nested_type[0] > field[0] > name: -desc_test_comments.proto:59:33 -desc_test_comments.proto:59:36 +desc_test_comments.proto:61:33 +desc_test_comments.proto:61:36 > message_type[0] > nested_type[0] > field[0] > number: -desc_test_comments.proto:59:39 -desc_test_comments.proto:59:40 +desc_test_comments.proto:61:39 +desc_test_comments.proto:61:40 > message_type[0] > nested_type[0] > field[1]: -desc_test_comments.proto:60:17 -desc_test_comments.proto:60:40 +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:40 > message_type[0] > nested_type[0] > field[1] > label: -desc_test_comments.proto:60:17 -desc_test_comments.proto:60:25 +desc_test_comments.proto:62:17 +desc_test_comments.proto:62:25 > message_type[0] > nested_type[0] > field[1] > type: -desc_test_comments.proto:60:26 -desc_test_comments.proto:60:31 +desc_test_comments.proto:62:26 +desc_test_comments.proto:62:31 > message_type[0] > nested_type[0] > field[1] > name: -desc_test_comments.proto:60:32 -desc_test_comments.proto:60:35 +desc_test_comments.proto:62:32 +desc_test_comments.proto:62:35 > message_type[0] > nested_type[0] > field[1] > number: -desc_test_comments.proto:60:38 -desc_test_comments.proto:60:39 +desc_test_comments.proto:62:38 +desc_test_comments.proto:62:39 > message_type[0] > nested_type[0] > options: -desc_test_comments.proto:62:17 -desc_test_comments.proto:62:64 +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor: -desc_test_comments.proto:62:17 -desc_test_comments.proto:62:64 +desc_test_comments.proto:64:17 +desc_test_comments.proto:64:64 > message_type[0] > nested_type[0] > field[2]: -desc_test_comments.proto:65:17 -desc_test_comments.proto:65:41 +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:41 Leading comments: Leading comment... @@ -485,424 +485,446 @@ desc_test_comments.proto:65:41 > message_type[0] > nested_type[0] > field[2] > label: -desc_test_comments.proto:65:17 -desc_test_comments.proto:65:25 +desc_test_comments.proto:67:17 +desc_test_comments.proto:67:25 > message_type[0] > nested_type[0] > field[2] > type: -desc_test_comments.proto:65:26 -desc_test_comments.proto:65:32 +desc_test_comments.proto:67:26 +desc_test_comments.proto:67:32 > message_type[0] > nested_type[0] > field[2] > name: -desc_test_comments.proto:65:33 -desc_test_comments.proto:65:36 +desc_test_comments.proto:67:33 +desc_test_comments.proto:67:36 > message_type[0] > nested_type[0] > field[2] > number: -desc_test_comments.proto:65:39 -desc_test_comments.proto:65:40 +desc_test_comments.proto:67:39 +desc_test_comments.proto:67:40 > message_type[0] > enum_type[0]: -desc_test_comments.proto:69:9 -desc_test_comments.proto:90:10 +desc_test_comments.proto:71:9 +desc_test_comments.proto:93:10 + Trailing comments: + trailer for enum + > message_type[0] > enum_type[0] > name: -desc_test_comments.proto:69:14 -desc_test_comments.proto:69:29 +desc_test_comments.proto:71:14 +desc_test_comments.proto:71:29 Trailing comments: "super"! > message_type[0] > enum_type[0] > options: -desc_test_comments.proto:72:17 -desc_test_comments.proto:72:43 +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 > message_type[0] > enum_type[0] > options > allow_alias: -desc_test_comments.proto:72:17 -desc_test_comments.proto:72:43 +desc_test_comments.proto:75:17 +desc_test_comments.proto:75:43 Leading comments: allow_alias comments! > message_type[0] > enum_type[0] > value[0]: -desc_test_comments.proto:74:17 -desc_test_comments.proto:74:86 +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:86 > message_type[0] > enum_type[0] > value[0] > name: -desc_test_comments.proto:74:17 -desc_test_comments.proto:74:22 +desc_test_comments.proto:77:17 +desc_test_comments.proto:77:22 > message_type[0] > enum_type[0] > value[0] > number: -desc_test_comments.proto:74:25 -desc_test_comments.proto:74:26 +desc_test_comments.proto:77:25 +desc_test_comments.proto:77:26 > message_type[0] > enum_type[0] > value[0] > options: -desc_test_comments.proto:74:27 -desc_test_comments.proto:74:85 +desc_test_comments.proto:77:27 +desc_test_comments.proto:77:85 > message_type[0] > enum_type[0] > value[0] > options > evfubars: -desc_test_comments.proto:74:28 -desc_test_comments.proto:74:56 +desc_test_comments.proto:77:28 +desc_test_comments.proto:77:56 > message_type[0] > enum_type[0] > value[0] > options > evfubar: -desc_test_comments.proto:74:58 -desc_test_comments.proto:74:84 +desc_test_comments.proto:77:58 +desc_test_comments.proto:77:84 > message_type[0] > enum_type[0] > value[1]: -desc_test_comments.proto:75:17 -desc_test_comments.proto:75:100 +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:100 > message_type[0] > enum_type[0] > value[1] > name: -desc_test_comments.proto:75:17 -desc_test_comments.proto:75:22 +desc_test_comments.proto:78:17 +desc_test_comments.proto:78:22 > message_type[0] > enum_type[0] > value[1] > number: -desc_test_comments.proto:75:25 -desc_test_comments.proto:75:26 +desc_test_comments.proto:78:25 +desc_test_comments.proto:78:26 > message_type[0] > enum_type[0] > value[1] > options: -desc_test_comments.proto:75:27 -desc_test_comments.proto:75:99 +desc_test_comments.proto:78:27 +desc_test_comments.proto:78:99 > message_type[0] > enum_type[0] > value[1] > options > evfubaruf: -desc_test_comments.proto:75:29 -desc_test_comments.proto:75:57 +desc_test_comments.proto:78:29 +desc_test_comments.proto:78:57 > message_type[0] > enum_type[0] > value[1] > options > evfubaru: -desc_test_comments.proto:75:73 -desc_test_comments.proto:75:98 +desc_test_comments.proto:78:73 +desc_test_comments.proto:78:98 > message_type[0] > enum_type[0] > value[2]: -desc_test_comments.proto:76:17 -desc_test_comments.proto:76:27 +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:27 > message_type[0] > enum_type[0] > value[2] > name: -desc_test_comments.proto:76:17 -desc_test_comments.proto:76:22 +desc_test_comments.proto:79:17 +desc_test_comments.proto:79:22 > message_type[0] > enum_type[0] > value[2] > number: -desc_test_comments.proto:76:25 -desc_test_comments.proto:76:26 +desc_test_comments.proto:79:25 +desc_test_comments.proto:79:26 > message_type[0] > enum_type[0] > value[3]: -desc_test_comments.proto:77:17 -desc_test_comments.proto:77:28 +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:28 > message_type[0] > enum_type[0] > value[3] > name: -desc_test_comments.proto:77:17 -desc_test_comments.proto:77:23 +desc_test_comments.proto:80:17 +desc_test_comments.proto:80:23 > message_type[0] > enum_type[0] > value[3] > number: -desc_test_comments.proto:77:26 -desc_test_comments.proto:77:27 +desc_test_comments.proto:80:26 +desc_test_comments.proto:80:27 > message_type[0] > enum_type[0] > options: -desc_test_comments.proto:79:17 -desc_test_comments.proto:79:52 +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 > message_type[0] > enum_type[0] > options > efubars: -desc_test_comments.proto:79:17 -desc_test_comments.proto:79:52 +desc_test_comments.proto:82:17 +desc_test_comments.proto:82:52 > message_type[0] > enum_type[0] > value[4]: -desc_test_comments.proto:81:17 -desc_test_comments.proto:81:27 +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:27 > message_type[0] > enum_type[0] > value[4] > name: -desc_test_comments.proto:81:17 -desc_test_comments.proto:81:22 +desc_test_comments.proto:84:17 +desc_test_comments.proto:84:22 > message_type[0] > enum_type[0] > value[4] > number: -desc_test_comments.proto:81:25 -desc_test_comments.proto:81:26 +desc_test_comments.proto:84:25 +desc_test_comments.proto:84:26 > message_type[0] > enum_type[0] > value[5]: -desc_test_comments.proto:82:17 -desc_test_comments.proto:82:29 +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:29 > message_type[0] > enum_type[0] > value[5] > name: -desc_test_comments.proto:82:17 -desc_test_comments.proto:82:24 +desc_test_comments.proto:85:17 +desc_test_comments.proto:85:24 > message_type[0] > enum_type[0] > value[5] > number: -desc_test_comments.proto:82:27 -desc_test_comments.proto:82:28 +desc_test_comments.proto:85:27 +desc_test_comments.proto:85:28 > message_type[0] > enum_type[0] > value[6]: -desc_test_comments.proto:83:17 -desc_test_comments.proto:83:60 +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:60 > message_type[0] > enum_type[0] > value[6] > name: -desc_test_comments.proto:83:17 -desc_test_comments.proto:83:24 +desc_test_comments.proto:86:17 +desc_test_comments.proto:86:24 > message_type[0] > enum_type[0] > value[6] > number: -desc_test_comments.proto:83:27 -desc_test_comments.proto:83:28 +desc_test_comments.proto:86:27 +desc_test_comments.proto:86:28 > message_type[0] > enum_type[0] > value[6] > options: -desc_test_comments.proto:83:29 -desc_test_comments.proto:83:59 +desc_test_comments.proto:86:29 +desc_test_comments.proto:86:59 > message_type[0] > enum_type[0] > value[6] > options > evfubarsf: -desc_test_comments.proto:83:30 -desc_test_comments.proto:83:58 +desc_test_comments.proto:86:30 +desc_test_comments.proto:86:58 > message_type[0] > enum_type[0] > value[7]: -desc_test_comments.proto:84:17 -desc_test_comments.proto:84:28 +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:28 > message_type[0] > enum_type[0] > value[7] > name: -desc_test_comments.proto:84:17 -desc_test_comments.proto:84:23 +desc_test_comments.proto:87:17 +desc_test_comments.proto:87:23 > message_type[0] > enum_type[0] > value[7] > number: -desc_test_comments.proto:84:26 -desc_test_comments.proto:84:27 +desc_test_comments.proto:87:26 +desc_test_comments.proto:87:27 > message_type[0] > enum_type[0] > value[8]: -desc_test_comments.proto:85:17 -desc_test_comments.proto:85:31 +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:31 > message_type[0] > enum_type[0] > value[8] > name: -desc_test_comments.proto:85:17 -desc_test_comments.proto:85:26 +desc_test_comments.proto:88:17 +desc_test_comments.proto:88:26 > message_type[0] > enum_type[0] > value[8] > number: -desc_test_comments.proto:85:29 -desc_test_comments.proto:85:30 +desc_test_comments.proto:88:29 +desc_test_comments.proto:88:30 > message_type[0] > enum_type[0] > value[9]: -desc_test_comments.proto:86:17 -desc_test_comments.proto:86:27 +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:27 > message_type[0] > enum_type[0] > value[9] > name: -desc_test_comments.proto:86:17 -desc_test_comments.proto:86:22 +desc_test_comments.proto:89:17 +desc_test_comments.proto:89:22 > message_type[0] > enum_type[0] > value[9] > number: -desc_test_comments.proto:86:25 -desc_test_comments.proto:86:26 +desc_test_comments.proto:89:25 +desc_test_comments.proto:89:26 > message_type[0] > enum_type[0] > value[10]: -desc_test_comments.proto:87:17 -desc_test_comments.proto:87:31 +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:31 > message_type[0] > enum_type[0] > value[10] > name: -desc_test_comments.proto:87:17 -desc_test_comments.proto:87:23 +desc_test_comments.proto:90:17 +desc_test_comments.proto:90:23 > message_type[0] > enum_type[0] > value[10] > number: -desc_test_comments.proto:87:26 -desc_test_comments.proto:87:30 +desc_test_comments.proto:90:26 +desc_test_comments.proto:90:30 > message_type[0] > enum_type[0] > options: -desc_test_comments.proto:89:17 -desc_test_comments.proto:89:50 +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 > message_type[0] > enum_type[0] > options > efubar: -desc_test_comments.proto:89:17 -desc_test_comments.proto:89:50 +desc_test_comments.proto:92:17 +desc_test_comments.proto:92:50 > message_type[0] > oneof_decl[0]: -desc_test_comments.proto:93:9 -desc_test_comments.proto:96:10 +desc_test_comments.proto:96:9 +desc_test_comments.proto:101:10 Leading comments: can be this or that + Trailing comments: + trailer for oneof abc + > message_type[0] > oneof_decl[0] > name: -desc_test_comments.proto:93:15 -desc_test_comments.proto:93:18 +desc_test_comments.proto:96:15 +desc_test_comments.proto:96:18 > message_type[0] > field[3]: -desc_test_comments.proto:94:17 -desc_test_comments.proto:94:33 +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:33 > message_type[0] > field[3] > type: -desc_test_comments.proto:94:17 -desc_test_comments.proto:94:23 +desc_test_comments.proto:99:17 +desc_test_comments.proto:99:23 > message_type[0] > field[3] > name: -desc_test_comments.proto:94:24 -desc_test_comments.proto:94:28 +desc_test_comments.proto:99:24 +desc_test_comments.proto:99:28 > message_type[0] > field[3] > number: -desc_test_comments.proto:94:31 -desc_test_comments.proto:94:32 +desc_test_comments.proto:99:31 +desc_test_comments.proto:99:32 > message_type[0] > field[4]: -desc_test_comments.proto:95:17 -desc_test_comments.proto:95:32 +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:32 > message_type[0] > field[4] > type: -desc_test_comments.proto:95:17 -desc_test_comments.proto:95:22 +desc_test_comments.proto:100:17 +desc_test_comments.proto:100:22 > message_type[0] > field[4] > name: -desc_test_comments.proto:95:23 -desc_test_comments.proto:95:27 +desc_test_comments.proto:100:23 +desc_test_comments.proto:100:27 > message_type[0] > field[4] > number: -desc_test_comments.proto:95:30 -desc_test_comments.proto:95:31 +desc_test_comments.proto:100:30 +desc_test_comments.proto:100:31 > message_type[0] > oneof_decl[1]: -desc_test_comments.proto:98:9 -desc_test_comments.proto:101:10 +desc_test_comments.proto:103:9 +desc_test_comments.proto:109:10 Leading comments: can be these or those > message_type[0] > oneof_decl[1] > name: -desc_test_comments.proto:98:15 -desc_test_comments.proto:98:18 +desc_test_comments.proto:103:15 +desc_test_comments.proto:103:18 + + + > message_type[0] > oneof_decl[1] > options: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:57 + + + > message_type[0] > oneof_decl[1] > options > oofubar[0]: +desc_test_comments.proto:105:17 +desc_test_comments.proto:105:57 + Leading comments: + whoops? + > message_type[0] > field[5]: -desc_test_comments.proto:99:17 -desc_test_comments.proto:99:34 +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:34 > message_type[0] > field[5] > type: -desc_test_comments.proto:99:17 -desc_test_comments.proto:99:23 +desc_test_comments.proto:107:17 +desc_test_comments.proto:107:23 > message_type[0] > field[5] > name: -desc_test_comments.proto:99:24 -desc_test_comments.proto:99:29 +desc_test_comments.proto:107:24 +desc_test_comments.proto:107:29 > message_type[0] > field[5] > number: -desc_test_comments.proto:99:32 -desc_test_comments.proto:99:33 +desc_test_comments.proto:107:32 +desc_test_comments.proto:107:33 > message_type[0] > field[6]: -desc_test_comments.proto:100:17 -desc_test_comments.proto:100:33 +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:33 > message_type[0] > field[6] > type: -desc_test_comments.proto:100:17 -desc_test_comments.proto:100:22 +desc_test_comments.proto:108:17 +desc_test_comments.proto:108:22 > message_type[0] > field[6] > name: -desc_test_comments.proto:100:23 -desc_test_comments.proto:100:28 +desc_test_comments.proto:108:23 +desc_test_comments.proto:108:28 > message_type[0] > field[6] > number: -desc_test_comments.proto:100:31 -desc_test_comments.proto:100:32 +desc_test_comments.proto:108:31 +desc_test_comments.proto:108:32 > message_type[0] > field[7]: -desc_test_comments.proto:104:9 -desc_test_comments.proto:104:40 +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:40 Leading comments: map field > message_type[0] > field[7] > type_name: -desc_test_comments.proto:104:9 -desc_test_comments.proto:104:28 +desc_test_comments.proto:112:9 +desc_test_comments.proto:112:28 > message_type[0] > field[7] > name: -desc_test_comments.proto:104:29 -desc_test_comments.proto:104:35 +desc_test_comments.proto:112:29 +desc_test_comments.proto:112:35 > message_type[0] > field[7] > number: -desc_test_comments.proto:104:38 -desc_test_comments.proto:104:39 +desc_test_comments.proto:112:38 +desc_test_comments.proto:112:39 > extension: -desc_test_comments.proto:108:1 -desc_test_comments.proto:117:2 +desc_test_comments.proto:117:1 +desc_test_comments.proto:128:2 + Leading detached comment [0]: + And next we'll need some extensions... + Trailing comments: - extend trailer... + trailer for extend block > extension[0]: -desc_test_comments.proto:114:9 -desc_test_comments.proto:114:37 +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:37 Leading comments: comment for guid1 > extension[0] > extendee: -desc_test_comments.proto:110:1 -desc_test_comments.proto:110:8 +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 Leading comments: extendee comment @@ -912,66 +934,68 @@ desc_test_comments.proto:110:8 > extension[0] > label: -desc_test_comments.proto:114:9 -desc_test_comments.proto:114:17 +desc_test_comments.proto:125:9 +desc_test_comments.proto:125:17 > extension[0] > type: -desc_test_comments.proto:114:18 -desc_test_comments.proto:114:24 +desc_test_comments.proto:125:18 +desc_test_comments.proto:125:24 > extension[0] > name: -desc_test_comments.proto:114:25 -desc_test_comments.proto:114:30 +desc_test_comments.proto:125:25 +desc_test_comments.proto:125:30 > extension[0] > number: -desc_test_comments.proto:114:33 -desc_test_comments.proto:114:36 +desc_test_comments.proto:125:33 +desc_test_comments.proto:125:36 > extension[1]: -desc_test_comments.proto:116:9 -desc_test_comments.proto:116:37 +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:37 Leading comments: ... and a comment for guid2 > extension[1] > extendee: -desc_test_comments.proto:110:1 -desc_test_comments.proto:110:8 +desc_test_comments.proto:119:1 +desc_test_comments.proto:119:8 > extension[1] > label: -desc_test_comments.proto:116:9 -desc_test_comments.proto:116:17 +desc_test_comments.proto:127:9 +desc_test_comments.proto:127:17 > extension[1] > type: -desc_test_comments.proto:116:18 -desc_test_comments.proto:116:24 +desc_test_comments.proto:127:18 +desc_test_comments.proto:127:24 > extension[1] > name: -desc_test_comments.proto:116:25 -desc_test_comments.proto:116:30 +desc_test_comments.proto:127:25 +desc_test_comments.proto:127:30 > extension[1] > number: -desc_test_comments.proto:116:33 -desc_test_comments.proto:116:36 +desc_test_comments.proto:127:33 +desc_test_comments.proto:127:36 > message_type[1]: -desc_test_comments.proto:120:1 -desc_test_comments.proto:120:81 +desc_test_comments.proto:131:1 +desc_test_comments.proto:131:115 + Trailing comments: + trailer for AnEmptyMessage > message_type[1] > name: -desc_test_comments.proto:120:36 -desc_test_comments.proto:120:50 +desc_test_comments.proto:131:36 +desc_test_comments.proto:131:50 Leading comments: name leading comment Trailing comments: @@ -979,83 +1003,87 @@ desc_test_comments.proto:120:50 > service[0]: -desc_test_comments.proto:123:1 -desc_test_comments.proto:141:2 +desc_test_comments.proto:134:1 +desc_test_comments.proto:156:2 Leading comments: Service comment Trailing comments: service trailer + that spans multiple lines > service[0] > name: -desc_test_comments.proto:123:28 -desc_test_comments.proto:123:38 +desc_test_comments.proto:134:28 +desc_test_comments.proto:134:38 Leading comments: service name > service[0] > options: -desc_test_comments.proto:125:9 -desc_test_comments.proto:125:43 +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 > service[0] > options > sfubar > id: -desc_test_comments.proto:125:9 -desc_test_comments.proto:125:43 +desc_test_comments.proto:139:9 +desc_test_comments.proto:139:43 Leading comments: option that sets field > service[0] > options: -desc_test_comments.proto:127:9 -desc_test_comments.proto:127:47 +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 > service[0] > options > sfubar > name: -desc_test_comments.proto:127:9 -desc_test_comments.proto:127:47 +desc_test_comments.proto:141:9 +desc_test_comments.proto:141:47 Leading comments: another option that sets field > service[0] > options: -desc_test_comments.proto:128:9 -desc_test_comments.proto:128:35 +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 > service[0] > options > deprecated: -desc_test_comments.proto:128:9 -desc_test_comments.proto:128:35 +desc_test_comments.proto:142:9 +desc_test_comments.proto:142:35 Trailing comments: DEPRECATED! > service[0] > options: -desc_test_comments.proto:130:9 -desc_test_comments.proto:130:45 +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 > service[0] > options > sfubare: -desc_test_comments.proto:130:9 -desc_test_comments.proto:130:45 +desc_test_comments.proto:144:9 +desc_test_comments.proto:144:45 > service[0] > method[0]: -desc_test_comments.proto:133:9 -desc_test_comments.proto:134:84 +desc_test_comments.proto:147:9 +desc_test_comments.proto:148:84 Leading comments: Method comment + Trailing comments: + compact method trailer + > service[0] > method[0] > name: -desc_test_comments.proto:133:28 -desc_test_comments.proto:133:40 +desc_test_comments.proto:147:28 +desc_test_comments.proto:147:40 Leading comments: rpc name Trailing comments: @@ -1063,74 +1091,80 @@ desc_test_comments.proto:133:40 > service[0] > method[0] > client_streaming: -desc_test_comments.proto:133:73 -desc_test_comments.proto:133:79 +desc_test_comments.proto:147:73 +desc_test_comments.proto:147:79 Leading comments: comment B > service[0] > method[0] > input_type: -desc_test_comments.proto:133:96 -desc_test_comments.proto:133:103 +desc_test_comments.proto:147:96 +desc_test_comments.proto:147:103 Leading comments: comment C > service[0] > method[0] > output_type: -desc_test_comments.proto:134:57 -desc_test_comments.proto:134:64 +desc_test_comments.proto:148:57 +desc_test_comments.proto:148:64 Leading comments: comment E > service[0] > method[1]: -desc_test_comments.proto:136:9 -desc_test_comments.proto:140:10 +desc_test_comments.proto:150:9 +desc_test_comments.proto:155:10 + Trailing comments: + trailer for method + > service[0] > method[1] > name: -desc_test_comments.proto:136:13 -desc_test_comments.proto:136:21 +desc_test_comments.proto:150:13 +desc_test_comments.proto:150:21 > service[0] > method[1] > input_type: -desc_test_comments.proto:136:23 -desc_test_comments.proto:136:30 +desc_test_comments.proto:150:23 +desc_test_comments.proto:150:30 > service[0] > method[1] > output_type: -desc_test_comments.proto:136:41 -desc_test_comments.proto:136:62 +desc_test_comments.proto:150:41 +desc_test_comments.proto:150:62 > service[0] > method[1] > options: -desc_test_comments.proto:137:17 -desc_test_comments.proto:137:42 +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 > service[0] > method[1] > options > deprecated: -desc_test_comments.proto:137:17 -desc_test_comments.proto:137:42 +desc_test_comments.proto:152:17 +desc_test_comments.proto:152:42 + Leading comments: + this RPC is deprecated! + > service[0] > method[1] > options: -desc_test_comments.proto:138:17 -desc_test_comments.proto:138:53 +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 > service[0] > method[1] > options > mtfubar[0]: -desc_test_comments.proto:138:17 -desc_test_comments.proto:138:53 +desc_test_comments.proto:153:17 +desc_test_comments.proto:153:53 > service[0] > method[1] > options: -desc_test_comments.proto:139:17 -desc_test_comments.proto:139:56 +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 > service[0] > method[1] > options > mtfubard: -desc_test_comments.proto:139:17 -desc_test_comments.proto:139:56 +desc_test_comments.proto:154:17 +desc_test_comments.proto:154:56 ---- desc_test_complex.proto ---- @@ -5261,8 +5295,6 @@ desc_test_complex.proto:268:48 > message_type[9]: desc_test_complex.proto:271:1 desc_test_complex.proto:296:2 - Trailing comments: - comment for last element in file, KeywordCollisionOptions > message_type[9] > name: diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/validate.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/validate.go index 353af6b267..2dd91618e5 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoparse/validate.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/validate.go @@ -14,6 +14,10 @@ func validateBasic(res *parseResult, containsErrors bool) { fd := res.fd isProto3 := fd.GetSyntax() == "proto3" + if validateImports(res) != nil { + return + } + for _, md := range fd.MessageType { if validateMessage(res, isProto3, "", md, containsErrors) != nil { return @@ -33,6 +37,27 @@ func validateBasic(res *parseResult, containsErrors bool) { } } +func validateImports(res *parseResult) error { + fileNode := res.root + if fileNode == nil { + return nil + } + imports := make(map[string]*ast.SourcePos, len(fileNode.Decls)) + for _, decl := range fileNode.Decls { + imp, ok := decl.(*ast.ImportNode) + if !ok { + continue + } + + name := imp.Name.AsString() + if imports[name] != nil { + return res.errs.handleErrorWithPos(imp.Start(), `%q was already imported at %v`, name, imports[name]) + } + imports[name] = imp.Start() + } + return nil +} + func validateMessage(res *parseResult, isProto3 bool, prefix string, md *dpb.DescriptorProto, containsErrors bool) error { nextPrefix := md.GetName() + "." diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go index 8d0a5cb278..5a52339b96 100644 --- a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go +++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go @@ -59,6 +59,17 @@ type Printer struct { // standard options before custom ones. SortElements bool + // The "less" function used to sort elements when printing. It is given two + // elements, a and b, and should return true if a is "less than" b. In this + // case, "less than" means that element a should appear earlier in the file + // than element b. + // + // If this field is nil, no custom sorting is done and the SortElements + // field is consulted to decide how to order the output. If this field is + // non-nil, the SortElements field is ignored and this function is called to + // order elements. + CustomSortFunction func(a, b Element) bool + // The indentation used. Any characters other than spaces or tabs will be // replaced with spaces. If unset/empty, two spaces will be used. Indent string @@ -89,8 +100,7 @@ type Printer struct { // repeated string names = 1; // trailing comment // // If the trailing comment has more than one line, it will automatically be - // forced to the next line. Also, elements that end with "}" instead of ";" - // will have trailing comments rendered on the subsequent line. + // forced to the next line. TrailingCommentsOnSeparateLine bool // If true, the printed output will eschew any blank lines, which otherwise @@ -129,6 +139,31 @@ type Printer struct { // When printing fully-qualified names, they will be preceded by a dot, to // avoid any ambiguity that they might be relative vs. fully-qualified. ForceFullyQualifiedNames bool + + // The number of options that trigger short options expressions to be + // rendered using multiple lines. Short options expressions are those + // found on fields and enum values, that use brackets ("[" and "]") and + // comma-separated options. If more options than this are present, they + // will be expanded to multiple lines (one option per line). + // + // If unset (e.g. if zero), a default threshold of 3 is used. + ShortOptionsExpansionThresholdCount int + + // The length of printed options that trigger short options expressions to + // be rendered using multiple lines. If the short options contain more than + // one option and their printed length is longer than this threshold, they + // will be expanded to multiple lines (one option per line). + // + // If unset (e.g. if zero), a default threshold of 50 is used. + ShortOptionsExpansionThresholdLength int + + // The length of a printed option value message literal that triggers the + // message literal to be rendered using multiple lines instead of using a + // compact single-line form. The message must include at least two fields + // or contain a field that is a nested message to be expanded. + // + // If unset (e.g. if zero), a default threshold of 50 is used. + MessageLiteralExpansionThresholdLength int } // CommentType is a kind of comments in a proto source file. This can be used @@ -617,7 +652,9 @@ func (p *Printer) computeExtensions(sourceInfo internal.SourceInfoMap, exts []*d } func (p *Printer) sort(elements elementAddrs, sourceInfo internal.SourceInfoMap, path []int32) { - if p.SortElements { + if p.CustomSortFunction != nil { + sort.Stable(customSortOrder{elementAddrs: elements, less: p.CustomSortFunction}) + } else if p.SortElements { // canonical sorted order sort.Stable(elements) } else { @@ -707,13 +744,14 @@ func (p *Printer) typeString(fld *desc.FieldDescriptor, scope string) string { func (p *Printer) printMessage(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { si := sourceInfo.Get(path) - p.printElement(true, si, w, indent, func(w *writer) { + p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) fmt.Fprint(w, "message ") nameSi := sourceInfo.Get(append(path, internal.Message_nameTag)) p.printElementString(nameSi, w, indent, md.GetName()) fmt.Fprintln(w, "{") + trailer(indent+1, true) p.printMessageBody(md, mf, w, sourceInfo, path, indent+1) p.indent(w, indent) @@ -930,7 +968,7 @@ func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFacto si = sourceInfo.Get(path) } - p.printElement(true, si, w, indent, func(w *writer) { + p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) if shouldEmitLabel(fld) { locSi := sourceInfo.Get(append(path, internal.Field_labelTag)) @@ -977,13 +1015,12 @@ func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFacto opts[-internal.Field_jsonNameTag] = []option{{name: "json_name", val: jsn}} } - elements := elementAddrs{dsc: fld, opts: opts} - elements.addrs = optionsAsElementAddrs(internal.Field_optionsTag, 0, opts) - p.sort(elements, sourceInfo, path) - p.printOptionElementsShort(elements, w, sourceInfo, path, indent) + p.printOptionsShort(fld, opts, internal.Field_optionsTag, w, sourceInfo, path, indent) if group { fmt.Fprintln(w, "{") + trailer(indent+1, true) + p.printMessageBody(fld.GetMessageType(), mf, w, sourceInfo, groupPath, indent+1) p.indent(w, indent) @@ -991,6 +1028,7 @@ func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFacto } else { fmt.Fprint(w, ";") + trailer(indent, false) } }) } @@ -1020,14 +1058,15 @@ func isGroup(fld *desc.FieldDescriptor) bool { func (p *Printer) printOneOf(ood *desc.OneOfDescriptor, parentElements elementAddrs, startFieldIndex int, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, ooIndex int32) { oopath := append(parentPath, internal.Message_oneOfsTag, ooIndex) oosi := sourceInfo.Get(oopath) - p.printElement(true, oosi, w, indent, func(w *writer) { + p.printBlockElement(true, oosi, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) fmt.Fprint(w, "oneof ") extNameSi := sourceInfo.Get(append(oopath, internal.OneOf_nameTag)) p.printElementString(extNameSi, w, indent, ood.GetName()) fmt.Fprintln(w, "{") - indent++ + trailer(indent, true) + opts, err := p.extractOptions(ood, ood.GetOptions(), mf) if err != nil { if w.err == nil { @@ -1087,6 +1126,11 @@ func (p *Printer) printExtensions(exts *extensionDecl, allExts extensions, paren p.printElementString(extNameSi, w, indent, p.qualifyName(pkg, scope, exts.extendee)) fmt.Fprintln(w, "{") + if p.printTrailingComments(exts.sourceInfo, w, indent+1) && !p.Compact { + // separator line between trailing comment and next element + fmt.Fprintln(w) + } + count := len(exts.fields) first := true for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ { @@ -1109,11 +1153,6 @@ func (p *Printer) printExtensions(exts *extensionDecl, allExts extensions, paren p.indent(w, indent) fmt.Fprintln(w, "}") - p.printTrailingComments(exts.sourceInfo, w, indent) - if indent >= 0 && !w.newline { - // if we're not printing inline but element did not have trailing newline, add one now - fmt.Fprintln(w) - } } func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges []*descriptor.DescriptorProto_ExtensionRange, maxTag int32, addrs []elementAddr, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) { @@ -1144,7 +1183,7 @@ func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges [] }) } dsc := extensionRange{owner: parent, extRange: ranges[0]} - p.printOptionsShort(dsc, opts, mf, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent) + p.extractAndPrintOptionsShort(dsc, opts, mf, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent) fmt.Fprintln(w, ";") } @@ -1197,15 +1236,16 @@ func (p *Printer) printReservedNames(names []string, addrs []elementAddr, w *wri func (p *Printer) printEnum(ed *desc.EnumDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { si := sourceInfo.Get(path) - p.printElement(true, si, w, indent, func(w *writer) { + p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) fmt.Fprint(w, "enum ") nameSi := sourceInfo.Get(append(path, internal.Enum_nameTag)) p.printElementString(nameSi, w, indent, ed.GetName()) fmt.Fprintln(w, "{") - indent++ + trailer(indent, true) + opts, err := p.extractOptions(ed, ed.GetOptions(), mf) if err != nil { if w.err == nil { @@ -1301,7 +1341,7 @@ func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, mf *dynamic.Mess numSi := sourceInfo.Get(append(path, internal.EnumVal_numberTag)) p.printElementString(numSi, w, indent, fmt.Sprintf("%d", evd.GetNumber())) - p.printOptionsShort(evd, evd.GetOptions(), mf, internal.EnumVal_optionsTag, w, sourceInfo, path, indent) + p.extractAndPrintOptionsShort(evd, evd.GetOptions(), mf, internal.EnumVal_optionsTag, w, sourceInfo, path, indent) fmt.Fprint(w, ";") }) @@ -1309,15 +1349,15 @@ func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, mf *dynamic.Mess func (p *Printer) printService(sd *desc.ServiceDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { si := sourceInfo.Get(path) - p.printElement(true, si, w, indent, func(w *writer) { + p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) fmt.Fprint(w, "service ") nameSi := sourceInfo.Get(append(path, internal.Service_nameTag)) p.printElementString(nameSi, w, indent, sd.GetName()) fmt.Fprintln(w, "{") - indent++ + trailer(indent, true) opts, err := p.extractOptions(sd, sd.GetOptions(), mf) if err != nil { @@ -1358,7 +1398,7 @@ func (p *Printer) printService(sd *desc.ServiceDescriptor, mf *dynamic.MessageFa func (p *Printer) printMethod(mtd *desc.MethodDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { si := sourceInfo.Get(path) pkg := mtd.GetFile().GetPackage() - p.printElement(true, si, w, indent, func(w *writer) { + p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) { p.indent(w, indent) fmt.Fprint(w, "rpc ") @@ -1394,24 +1434,26 @@ func (p *Printer) printMethod(mtd *desc.MethodDescriptor, mf *dynamic.MessageFac if len(opts) > 0 { fmt.Fprintln(w, "{") indent++ + trailer(indent, true) elements := elementAddrs{dsc: mtd, opts: opts} elements.addrs = optionsAsElementAddrs(internal.Method_optionsTag, 0, opts) p.sort(elements, sourceInfo, path) - path = append(path, internal.Method_optionsTag) - for i, addr := range elements.addrs { + for i, el := range elements.addrs { if i > 0 { p.newLine(w) } - o := elements.at(addr).([]option) - p.printOptionsLong(o, w, sourceInfo, path, indent) + o := elements.at(el).([]option) + childPath := append(path, el.elementType, int32(el.elementIndex)) + p.printOptionsLong(o, w, sourceInfo, childPath, indent) } p.indent(w, indent-1) fmt.Fprintln(w, "}") } else { fmt.Fprint(w, ";") + trailer(indent, false) } }) } @@ -1421,15 +1463,16 @@ func (p *Printer) printOptionsLong(opts []option, w *writer, sourceInfo internal func(i int32) *descriptor.SourceCodeInfo_Location { return sourceInfo.Get(append(path, i)) }, - func(w *writer, indent int, opt option) { + func(w *writer, indent int, opt option, _ bool) { p.indent(w, indent) fmt.Fprint(w, "option ") p.printOption(opt.name, opt.val, w, indent) fmt.Fprint(w, ";") - }) + }, + false) } -func (p *Printer) printOptionsShort(dsc interface{}, optsMsg proto.Message, mf *dynamic.MessageFactory, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { +func (p *Printer) extractAndPrintOptionsShort(dsc interface{}, optsMsg proto.Message, mf *dynamic.MessageFactory, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { d, ok := dsc.(desc.Descriptor) if !ok { d = dsc.(extensionRange).owner @@ -1441,20 +1484,64 @@ func (p *Printer) printOptionsShort(dsc interface{}, optsMsg proto.Message, mf * } return } + p.printOptionsShort(dsc, opts, optsTag, w, sourceInfo, path, indent) +} +func (p *Printer) printOptionsShort(dsc interface{}, opts map[int32][]option, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { elements := elementAddrs{dsc: dsc, opts: opts} elements.addrs = optionsAsElementAddrs(optsTag, 0, opts) + if len(elements.addrs) == 0 { + return + } p.sort(elements, sourceInfo, path) - p.printOptionElementsShort(elements, w, sourceInfo, path, indent) + + // we render expanded form if there are many options + count := 0 + for _, addr := range elements.addrs { + opts := elements.at(addr).([]option) + count += len(opts) + } + threshold := p.ShortOptionsExpansionThresholdCount + if threshold <= 0 { + threshold = 3 + } + + if count > threshold { + p.printOptionElementsShort(elements, w, sourceInfo, path, indent, true) + } else { + var tmp bytes.Buffer + tmpW := *w + tmpW.Writer = &tmp + p.printOptionElementsShort(elements, &tmpW, sourceInfo, path, indent, false) + threshold := p.ShortOptionsExpansionThresholdLength + if threshold <= 0 { + threshold = 50 + } + // we subtract 3 so we don't consider the leading " [" and trailing "]" + if tmp.Len()-3 > threshold { + p.printOptionElementsShort(elements, w, sourceInfo, path, indent, true) + } else { + // not too long: commit what we rendered + b := tmp.Bytes() + if w.space && len(b) > 0 && b[0] == ' ' { + // don't write extra space + b = b[1:] + } + w.Write(b) + w.newline = tmpW.newline + w.space = tmpW.space + } + } } -func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) { - if len(addrs.addrs) == 0 { - return +func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int, expand bool) { + if expand { + fmt.Fprintln(w, "[") + indent++ + } else { + fmt.Fprint(w, "[") } - first := true - fmt.Fprint(w, "[") - for _, addr := range addrs.addrs { + for i, addr := range addrs.addrs { opts := addrs.at(addr).([]option) var childPath []int32 if addr.elementIndex < 0 { @@ -1463,7 +1550,11 @@ func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, source } else { childPath = append(path, addr.elementType, int32(addr.elementIndex)) } - p.printOptions(opts, w, inline(indent), + optIndent := indent + if !expand { + optIndent = inline(indent) + } + p.printOptions(opts, w, optIndent, func(i int32) *descriptor.SourceCodeInfo_Location { p := childPath if addr.elementIndex >= 0 { @@ -1471,24 +1562,36 @@ func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, source } return sourceInfo.Get(p) }, - func(w *writer, indent int, opt option) { - if first { - first = false - } else { - fmt.Fprint(w, ", ") + func(w *writer, indent int, opt option, more bool) { + if expand { + p.indent(w, indent) } p.printOption(opt.name, opt.val, w, indent) - fmt.Fprint(w, " ") // trailing space - }) + if more { + if expand { + fmt.Fprintln(w, ",") + } else { + fmt.Fprint(w, ", ") + } + } + }, + i < len(addrs.addrs)-1) + } + if expand { + p.indent(w, indent-1) } fmt.Fprint(w, "] ") } -func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptor.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option)) { +func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptor.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option, more bool), haveMore bool) { for i, opt := range opts { + more := haveMore + if !more { + more = i < len(opts)-1 + } si := siFetch(int32(i)) p.printElement(false, si, w, indent, func(w *writer) { - fn(w, indent, opt) + fn(w, indent, opt, more) }) } } @@ -1561,18 +1664,71 @@ func (p *Printer) printOption(name string, optVal interface{}, w *writer, indent case *desc.EnumValueDescriptor: fmt.Fprintf(w, "%s", optVal.GetName()) case proto.Message: - // TODO: if value is too long, marshal to text format with indentation to - // make output prettier (also requires correctly indenting subsequent lines) - // TODO: alternate approach so we can apply p.ForceFullyQualifiedNames // inside the resulting value? - fmt.Fprintf(w, "{ %s }", proto.CompactTextString(optVal)) + if indent < 0 { + // if printing inline, always use compact form + fmt.Fprintf(w, "{ %s }", proto.CompactTextString(optVal)) + return + } + m := proto.TextMarshaler{ + Compact: true, + ExpandAny: true, + } + str := strings.TrimSuffix(m.Text(optVal), " ") + fieldCount := strings.Count(str, ":") + nestedCount := strings.Count(str, "{") + strings.Count(str, "<") + if fieldCount <= 1 && nestedCount == 0 { + // can't expand + fmt.Fprintf(w, "{ %s }", str) + return + } + threshold := p.MessageLiteralExpansionThresholdLength + if threshold == 0 { + threshold = 50 + } + if len(str) <= threshold { + // no need to expand + fmt.Fprintf(w, "{ %s }", str) + return + } + + // multi-line form + m.Compact = false + str = m.Text(optVal) + fmt.Fprintln(w, "{") + p.indentMessageLiteral(w, indent+1, str) + p.indent(w, indent) + fmt.Fprint(w, "}") default: panic(fmt.Sprintf("unknown type of value %T for field %s", optVal, name)) } } +func (p *Printer) indentMessageLiteral(w *writer, indent int, val string) { + lines := strings.Split(val, "\n") + for _, l := range lines { + if l == "" { + continue + } + if p.Indent != " " { + var prefix int + for i := 0; i < len(l); i++ { + if l[i] != ' ' { + prefix = i + break + } + } + // replace text marshaller indent (2 spaces) with p.Indent + prefixStr := strings.ReplaceAll(l[:prefix], " ", p.Indent) + l = prefixStr + l[prefix:] + } + p.indent(w, indent) + fmt.Fprintln(w, l) + } +} + type edgeKind int const ( @@ -1726,12 +1882,6 @@ func (p *Printer) extractOptions(dsc desc.Descriptor, opts proto.Message, mf *dy } e = ev } - var name string - if fld.IsExtension() { - name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName())) - } else { - name = fld.GetName() - } opts = append(opts, option{name: name, val: e}) } case map[interface{}]interface{}: @@ -1825,7 +1975,6 @@ func optionsAsElementAddrs(optionsTag int32, order int, opts map[int32][]option) for tag := range opts { optAddrs = append(optAddrs, elementAddr{elementType: optionsTag, elementIndex: int(tag), order: order}) } - sort.Sort(optionsByName{addrs: optAddrs, opts: opts}) return optAddrs } @@ -2162,19 +2311,15 @@ func (a elementSrcOrder) Less(i, j int) bool { return false } -type optionsByName struct { - addrs []elementAddr - opts map[int32][]option -} - -func (o optionsByName) Len() int { - return len(o.addrs) +type customSortOrder struct { + elementAddrs + less func(a, b Element) bool } -func (o optionsByName) Less(i, j int) bool { - oi := o.opts[int32(o.addrs[i].elementIndex)] - oj := o.opts[int32(o.addrs[j].elementIndex)] - return optionLess(oi, oj) +func (cso customSortOrder) Less(i, j int) bool { + ei := asElement(cso.at(cso.addrs[i])) + ej := asElement(cso.at(cso.addrs[j])) + return cso.less(ei, ej) } func optionLess(i, j []option) bool { @@ -2188,8 +2333,24 @@ func optionLess(i, j []option) bool { return ni < nj } -func (o optionsByName) Swap(i, j int) { - o.addrs[i], o.addrs[j] = o.addrs[j], o.addrs[i] +func (p *Printer) printBlockElement(isDecriptor bool, si *descriptor.SourceCodeInfo_Location, w *writer, indent int, el func(w *writer, trailer func(indent int, wantTrailingNewline bool))) { + includeComments := isDecriptor || p.includeCommentType(CommentsTokens) + + if includeComments && si != nil { + p.printLeadingComments(si, w, indent) + } + el(w, func(indent int, wantTrailingNewline bool) { + if includeComments && si != nil { + if p.printTrailingComments(si, w, indent) && wantTrailingNewline && !p.Compact { + // separator line between trailing comment and next element + fmt.Fprintln(w) + } + } + }) + if indent >= 0 && !w.newline { + // if we're not printing inline but element did not have trailing newline, add one now + fmt.Fprintln(w) + } } func (p *Printer) printElement(isDecriptor bool, si *descriptor.SourceCodeInfo_Location, w *writer, indent int, el func(*writer)) { @@ -2262,7 +2423,7 @@ func (p *Printer) printLeadingComments(si *descriptor.SourceCodeInfo_Location, w return endsInNewLine } -func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) { +func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) bool { if p.includeCommentType(CommentsTrailing) && si.GetTrailingComments() != "" { if !p.printComment(si.GetTrailingComments(), w, indent, p.TrailingCommentsOnSeparateLine) && indent >= 0 { // trailing comment didn't end with newline but needs one @@ -2271,7 +2432,10 @@ func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, } else if indent < 0 { fmt.Fprint(w, " ") } + return true } + + return false } func (p *Printer) printComment(comments string, w *writer, indent int, forceNextLine bool) bool { @@ -2405,7 +2569,7 @@ func (w *writer) Write(p []byte) (int, error) { if w.space { // skip any trailing space if the following // character is semicolon, comma, or close bracket - if p[0] != ';' && p[0] != ',' && p[0] != ']' { + if p[0] != ';' && p[0] != ',' { _, err := w.Writer.Write([]byte{' '}) if err != nil { w.err = err diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go new file mode 100644 index 0000000000..fe5a26ee10 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go @@ -0,0 +1,439 @@ +package protoprint + +import ( + "fmt" + "strings" + + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + + "github.com/jhump/protoreflect/desc" +) + +// ElementKind is an enumeration of the types of elements in a protobuf +// file descriptor. This can be used by custom sort functions, for +// printing a file using a custom ordering of elements. +type ElementKind int + +const ( + KindPackage = ElementKind(iota) + 1 + KindImport + KindOption + KindField + KindMessage + KindEnum + KindService + KindExtensionRange + KindExtension + KindReservedRange + KindReservedName + KindEnumValue + KindMethod +) + +// Element represents an element in a proto descriptor that can be +// printed. This interface is primarily used to allow users of this package to +// define custom sort orders for the printed output. The methods of this +// interface represent the values that can be used for ordering elements. +type Element interface { + // Kind returns the kind of the element. The kind determines which other + // methods are applicable. + Kind() ElementKind + // Name returns the element name. This is NOT applicable to syntax, + // extension range, and reserved range kinds and will return the empty + // string for these kinds. For custom options, this will be the + // fully-qualified name of the corresponding extension. + Name() string + // Number returns the element number. This is only applicable to field, + // extension, and enum value kinds and will return zero for all other kinds. + Number() int32 + // NumberRange returns the range of numbers/tags for the element. This is + // only applicable to extension ranges and reserved ranges and will return + // (0, 0) for all other kinds. + NumberRange() (int32, int32) + // Extendee is the extended message for the extension element. Elements + // other than extensions will return the empty string. + Extendee() string + // IsCustomOption returns true if the element is a custom option. If it is + // not (including if the element kind is not option) then this method will + // return false. + IsCustomOption() bool +} + +func asElement(v interface{}) Element { + switch v := v.(type) { + case pkg: + return pkgElement(v) + case imp: + return impElement(v) + case []option: + return (*optionElement)(&v[0]) + case reservedRange: + return resvdRangeElement(v) + case string: + return resvdNameElement(v) + case *desc.FieldDescriptor: + return (*fieldElement)(v) + case *desc.MessageDescriptor: + return (*msgElement)(v) + case *desc.EnumDescriptor: + return (*enumElement)(v) + case *desc.EnumValueDescriptor: + return (*enumValElement)(v) + case *desc.ServiceDescriptor: + return (*svcElement)(v) + case *desc.MethodDescriptor: + return (*methodElement)(v) + case *dpb.DescriptorProto_ExtensionRange: + return (*extRangeElement)(v) + default: + panic(fmt.Sprintf("unexpected type of element: %T", v)) + } +} + +type pkgElement pkg + +var _ Element = pkgElement("") + +func (p pkgElement) Kind() ElementKind { + return KindPackage +} + +func (p pkgElement) Name() string { + return string(p) +} + +func (p pkgElement) Number() int32 { + return 0 +} + +func (p pkgElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (p pkgElement) Extendee() string { + return "" +} + +func (p pkgElement) IsCustomOption() bool { + return false +} + +type impElement imp + +var _ Element = impElement("") + +func (i impElement) Kind() ElementKind { + return KindImport +} + +func (i impElement) Name() string { + return string(i) +} + +func (i impElement) Number() int32 { + return 0 +} + +func (i impElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (i impElement) Extendee() string { + return "" +} + +func (i impElement) IsCustomOption() bool { + return false +} + +type optionElement option + +var _ Element = (*optionElement)(nil) + +func (o *optionElement) Kind() ElementKind { + return KindOption +} + +func (o *optionElement) Name() string { + if strings.HasPrefix(o.name, "(") { + // remove parentheses + return o.name[1 : len(o.name)-1] + } + return o.name +} + +func (o *optionElement) Number() int32 { + return 0 +} + +func (o *optionElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (o *optionElement) Extendee() string { + return "" +} + +func (o *optionElement) IsCustomOption() bool { + return strings.HasPrefix(o.name, "(") +} + +type resvdRangeElement reservedRange + +var _ Element = resvdRangeElement{} + +func (r resvdRangeElement) Kind() ElementKind { + return KindReservedRange +} + +func (r resvdRangeElement) Name() string { + return "" +} + +func (r resvdRangeElement) Number() int32 { + return 0 +} + +func (r resvdRangeElement) NumberRange() (int32, int32) { + return r.start, r.end +} + +func (r resvdRangeElement) Extendee() string { + return "" +} + +func (r resvdRangeElement) IsCustomOption() bool { + return false +} + +type resvdNameElement string + +var _ Element = resvdNameElement("") + +func (r resvdNameElement) Kind() ElementKind { + return KindReservedName +} + +func (r resvdNameElement) Name() string { + return string(r) +} + +func (r resvdNameElement) Number() int32 { + return 0 +} + +func (r resvdNameElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (r resvdNameElement) Extendee() string { + return "" +} + +func (r resvdNameElement) IsCustomOption() bool { + return false +} + +type fieldElement desc.FieldDescriptor + +var _ Element = (*fieldElement)(nil) + +func (f *fieldElement) Kind() ElementKind { + if (*desc.FieldDescriptor)(f).IsExtension() { + return KindExtension + } + return KindField +} + +func (f *fieldElement) Name() string { + return (*desc.FieldDescriptor)(f).GetName() +} + +func (f *fieldElement) Number() int32 { + return (*desc.FieldDescriptor)(f).GetNumber() +} + +func (f *fieldElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (f *fieldElement) Extendee() string { + fd := (*desc.FieldDescriptor)(f) + if fd.IsExtension() { + fd.GetOwner().GetFullyQualifiedName() + } + return "" +} + +func (f *fieldElement) IsCustomOption() bool { + return false +} + +type msgElement desc.MessageDescriptor + +var _ Element = (*msgElement)(nil) + +func (m *msgElement) Kind() ElementKind { + return KindMessage +} + +func (m *msgElement) Name() string { + return (*desc.MessageDescriptor)(m).GetName() +} + +func (m *msgElement) Number() int32 { + return 0 +} + +func (m *msgElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (m *msgElement) Extendee() string { + return "" +} + +func (m *msgElement) IsCustomOption() bool { + return false +} + +type enumElement desc.EnumDescriptor + +var _ Element = (*enumElement)(nil) + +func (e *enumElement) Kind() ElementKind { + return KindEnum +} + +func (e *enumElement) Name() string { + return (*desc.EnumDescriptor)(e).GetName() +} + +func (e *enumElement) Number() int32 { + return 0 +} + +func (e *enumElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (e *enumElement) Extendee() string { + return "" +} + +func (e *enumElement) IsCustomOption() bool { + return false +} + +type enumValElement desc.EnumValueDescriptor + +var _ Element = (*enumValElement)(nil) + +func (e *enumValElement) Kind() ElementKind { + return KindEnumValue +} + +func (e *enumValElement) Name() string { + return (*desc.EnumValueDescriptor)(e).GetName() +} + +func (e *enumValElement) Number() int32 { + return (*desc.EnumValueDescriptor)(e).GetNumber() +} + +func (e *enumValElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (e *enumValElement) Extendee() string { + return "" +} + +func (e *enumValElement) IsCustomOption() bool { + return false +} + +type svcElement desc.ServiceDescriptor + +var _ Element = (*svcElement)(nil) + +func (s *svcElement) Kind() ElementKind { + return KindService +} + +func (s *svcElement) Name() string { + return (*desc.ServiceDescriptor)(s).GetName() +} + +func (s *svcElement) Number() int32 { + return 0 +} + +func (s *svcElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (s *svcElement) Extendee() string { + return "" +} + +func (s *svcElement) IsCustomOption() bool { + return false +} + +type methodElement desc.MethodDescriptor + +var _ Element = (*methodElement)(nil) + +func (m *methodElement) Kind() ElementKind { + return KindMethod +} + +func (m *methodElement) Name() string { + return (*desc.MethodDescriptor)(m).GetName() +} + +func (m *methodElement) Number() int32 { + return 0 +} + +func (m *methodElement) NumberRange() (int32, int32) { + return 0, 0 +} + +func (m *methodElement) Extendee() string { + return "" +} + +func (m *methodElement) IsCustomOption() bool { + return false +} + +type extRangeElement dpb.DescriptorProto_ExtensionRange + +var _ Element = (*extRangeElement)(nil) + +func (e *extRangeElement) Kind() ElementKind { + return KindExtensionRange +} + +func (e *extRangeElement) Name() string { + return "" +} + +func (e *extRangeElement) Number() int32 { + return 0 +} + +func (e *extRangeElement) NumberRange() (int32, int32) { + ext := (*dpb.DescriptorProto_ExtensionRange)(e) + return ext.GetStart(), ext.GetEnd() +} + +func (e *extRangeElement) Extendee() string { + return "" +} + +func (e *extRangeElement) IsCustomOption() bool { + return false +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go new file mode 100644 index 0000000000..20d2d7a0dd --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go @@ -0,0 +1,207 @@ +package sourceinfo + +import ( + "math" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/jhump/protoreflect/desc/internal" +) + +// NB: forked from google.golang.org/protobuf/internal/filedesc +type sourceLocations struct { + protoreflect.SourceLocations + + orig []*descriptorpb.SourceCodeInfo_Location + // locs is a list of sourceLocations. + // The SourceLocation.Next field does not need to be populated + // as it will be lazily populated upon first need. + locs []protoreflect.SourceLocation + + // fd is the parent file descriptor that these locations are relative to. + // If non-nil, ByDescriptor verifies that the provided descriptor + // is a child of this file descriptor. + fd protoreflect.FileDescriptor + + once sync.Once + byPath map[pathKey]int +} + +func (p *sourceLocations) Len() int { return len(p.orig) } +func (p *sourceLocations) Get(i int) protoreflect.SourceLocation { + return p.lazyInit().locs[i] +} +func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation { + if i, ok := p.lazyInit().byPath[k]; ok { + return p.locs[i] + } + return protoreflect.SourceLocation{} +} +func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { + return p.byKey(newPathKey(path)) +} +func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { + if p.fd != nil && desc != nil && p.fd != desc.ParentFile() { + return protoreflect.SourceLocation{} // mismatching parent imports + } + var pathArr [16]int32 + path := pathArr[:0] + for { + switch desc.(type) { + case protoreflect.FileDescriptor: + // Reverse the path since it was constructed in reverse. + for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { + path[i], path[j] = path[j], path[i] + } + return p.byKey(newPathKey(path)) + case protoreflect.MessageDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_messagesTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_nestedMessagesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() + path = append(path, int32(desc.Index())) + desc = desc.Parent() + if isExtension { + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_extensionsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_extensionsTag)) + default: + return protoreflect.SourceLocation{} + } + } else { + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_fieldsTag)) + default: + return protoreflect.SourceLocation{} + } + } + case protoreflect.OneofDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_oneOfsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_enumsTag)) + case protoreflect.MessageDescriptor: + path = append(path, int32(internal.Message_enumsTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.EnumValueDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.EnumDescriptor: + path = append(path, int32(internal.Enum_valuesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.ServiceDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.FileDescriptor: + path = append(path, int32(internal.File_servicesTag)) + default: + return protoreflect.SourceLocation{} + } + case protoreflect.MethodDescriptor: + path = append(path, int32(desc.Index())) + desc = desc.Parent() + switch desc.(type) { + case protoreflect.ServiceDescriptor: + path = append(path, int32(internal.Service_methodsTag)) + default: + return protoreflect.SourceLocation{} + } + default: + return protoreflect.SourceLocation{} + } + } +} +func (p *sourceLocations) lazyInit() *sourceLocations { + p.once.Do(func() { + if len(p.orig) > 0 { + p.locs = make([]protoreflect.SourceLocation, len(p.orig)) + // Collect all the indexes for a given path. + pathIdxs := make(map[pathKey][]int, len(p.locs)) + for i := range p.orig { + l := asSourceLocation(p.orig[i]) + p.locs[i] = l + k := newPathKey(l.Path) + pathIdxs[k] = append(pathIdxs[k], i) + } + + // Update the next index for all locations. + p.byPath = make(map[pathKey]int, len(p.locs)) + for k, idxs := range pathIdxs { + for i := 0; i < len(idxs)-1; i++ { + p.locs[idxs[i]].Next = idxs[i+1] + } + p.locs[idxs[len(idxs)-1]].Next = 0 + p.byPath[k] = idxs[0] // record the first location for this path + } + } + }) + return p +} + +func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation { + endLine := l.Span[0] + endCol := l.Span[2] + if len(l.Span) > 3 { + endLine = l.Span[2] + endCol = l.Span[3] + } + return protoreflect.SourceLocation{ + Path: l.Path, + StartLine: int(l.Span[0]), + StartColumn: int(l.Span[1]), + EndLine: int(endLine), + EndColumn: int(endCol), + LeadingDetachedComments: l.LeadingDetachedComments, + LeadingComments: l.GetLeadingComments(), + TrailingComments: l.GetTrailingComments(), + } +} + +// pathKey is a comparable representation of protoreflect.SourcePath. +type pathKey struct { + arr [16]uint8 // first n-1 path segments; last element is the length + str string // used if the path does not fit in arr +} + +func newPathKey(p protoreflect.SourcePath) (k pathKey) { + if len(p) < len(k.arr) { + for i, ps := range p { + if ps < 0 || math.MaxUint8 <= ps { + return pathKey{str: p.String()} + } + k.arr[i] = uint8(ps) + } + k.arr[len(k.arr)-1] = uint8(len(p)) + return k + } + return pathKey{str: p.String()} +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go new file mode 100644 index 0000000000..a1efef62c4 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go @@ -0,0 +1,178 @@ +// Package sourceinfo provides the ability to register and query source code info +// for file descriptors that are compiled into the binary. This data is registered +// by code generated from the protoc-gen-gosrcinfo plugin. +// +// The standard descriptors bundled into the compiled binary are stripped of source +// code info, to reduce binary size and reduce runtime memory footprint. However, +// the source code info can be very handy and worth the size cost when used with +// gRPC services and the server reflection service. Without source code info, the +// descriptors that a client downloads from the reflection service have no comments. +// But the presence of comments, and the ability to show them to humans, can greatly +// improve the utility of user agents that use the reflection service. +// +// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load +// descriptors for compiled-in elements, will automatically include source code +// info, using the data registered with this package. +// +// In order to make the reflection service use this functionality, you will need to +// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The +// following snippet demonstrates how to do this in your server. Do this instead of +// using the reflection.Register function: +// +// refSvr := reflection.NewServer(reflection.ServerOptions{ +// Services: grpcServer, +// DescriptorResolver: sourceinfo.GlobalFiles, +// ExtensionResolver: sourceinfo.GlobalFiles, +// }) +// grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr) +// +package sourceinfo + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + // GlobalFiles is a registry of descriptors that include source code info, if the + // file they belong to were processed with protoc-gen-gosrcinfo. + // + // If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that + // can include source code info in the returned descriptors. + GlobalFiles Resolver = registry{} + + mu sync.RWMutex + sourceInfoByFile = map[string]*descriptorpb.SourceCodeInfo{} + fileDescriptors = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{} +) + +type Resolver interface { + protodesc.Resolver + protoregistry.ExtensionTypeResolver + RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) +} + +// RegisterSourceInfo registers the given source code info for the file descriptor +// with the given path/name. +// +// This is automatically used from generated code if using the protoc-gen-gosrcinfo +// plugin. +func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) { + mu.Lock() + defer mu.Unlock() + sourceInfoByFile[file] = srcInfo +} + +// SourceInfoForFile queries for any registered source code info for the file +// descriptor with the given path/name. It returns nil if no source code info +// was registered. +func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo { + mu.RLock() + defer mu.RUnlock() + return sourceInfoByFile[file] +} + +func getFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor { + if fd == nil { + return nil + } + + mu.RLock() + result := fileDescriptors[fd] + mu.RUnlock() + + if result != nil { + return result + } + + mu.Lock() + defer mu.Unlock() + // double-check, in case it was added to map while upgrading lock + result = fileDescriptors[fd] + if result != nil { + return result + } + + srcInfo := sourceInfoByFile[fd.Path()] + if len(srcInfo.GetLocation()) > 0 { + result = &fileDescriptor{ + FileDescriptor: fd, + locs: &sourceLocations{ + orig: srcInfo.Location, + }, + } + } else { + // nothing to do; don't bother wrapping + result = fd + } + fileDescriptors[fd] = result + return result +} + +type registry struct{} + +var _ protodesc.Resolver = ®istry{} + +func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + fd, err := protoregistry.GlobalFiles.FindFileByPath(path) + if err != nil { + return nil, err + } + return getFile(fd), nil +} + +func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + d, err := protoregistry.GlobalFiles.FindDescriptorByName(name) + if err != nil { + return nil, err + } + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d), nil + case protoreflect.MessageDescriptor: + return messageDescriptor{d}, nil + case protoreflect.ExtensionTypeDescriptor: + return extensionDescriptor{d}, nil + case protoreflect.FieldDescriptor: + return fieldDescriptor{d}, nil + case protoreflect.OneofDescriptor: + return oneOfDescriptor{d}, nil + case protoreflect.EnumDescriptor: + return enumDescriptor{d}, nil + case protoreflect.EnumValueDescriptor: + return enumValueDescriptor{d}, nil + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d}, nil + case protoreflect.MethodDescriptor: + return methodDescriptor{d}, nil + default: + return nil, fmt.Errorf("unrecognized descriptor type: %T", d) + } +} + +func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByName(field) + if err != nil { + return nil, err + } + return extensionType{xt}, nil +} + +func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field) + if err != nil { + return nil, err + } + return extensionType{xt}, nil +} + +func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) { + protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool { + return fn(extensionType{xt}) + }) +} diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go new file mode 100644 index 0000000000..134dadf407 --- /dev/null +++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/wrappers.go @@ -0,0 +1,508 @@ +package sourceinfo + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// These are wrappers around the various interfaces in the +// google.golang.org/protobuf/reflect/protoreflect that all +// make sure to return a FileDescriptor that includes source +// code info. + +type fileDescriptor struct { + protoreflect.FileDescriptor + locs protoreflect.SourceLocations +} + +func (f fileDescriptor) ParentFile() protoreflect.FileDescriptor { + return f +} + +func (f fileDescriptor) Parent() protoreflect.Descriptor { + return nil +} + +func (f fileDescriptor) Imports() protoreflect.FileImports { + return imports{f.FileDescriptor.Imports()} +} + +func (f fileDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{f.FileDescriptor.Messages()} +} + +func (f fileDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{f.FileDescriptor.Enums()} +} + +func (f fileDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{f.FileDescriptor.Extensions()} +} + +func (f fileDescriptor) Services() protoreflect.ServiceDescriptors { + return services{f.FileDescriptor.Services()} +} + +func (f fileDescriptor) SourceLocations() protoreflect.SourceLocations { + return f.locs +} + +type imports struct { + protoreflect.FileImports +} + +func (im imports) Get(i int) protoreflect.FileImport { + fi := im.FileImports.Get(i) + return protoreflect.FileImport{ + FileDescriptor: getFile(fi.FileDescriptor), + IsPublic: fi.IsPublic, + IsWeak: fi.IsWeak, + } +} + +type messages struct { + protoreflect.MessageDescriptors +} + +func (m messages) Get(i int) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.Get(i)} +} + +func (m messages) ByName(n protoreflect.Name) protoreflect.MessageDescriptor { + return messageDescriptor{m.MessageDescriptors.ByName(n)} +} + +type enums struct { + protoreflect.EnumDescriptors +} + +func (e enums) Get(i int) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.Get(i)} +} + +func (e enums) ByName(n protoreflect.Name) protoreflect.EnumDescriptor { + return enumDescriptor{e.EnumDescriptors.ByName(n)} +} + +type extensions struct { + protoreflect.ExtensionDescriptors +} + +func (e extensions) Get(i int) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.Get(i) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +func (e extensions) ByName(n protoreflect.Name) protoreflect.ExtensionDescriptor { + d := e.ExtensionDescriptors.ByName(n) + if ed, ok := d.(protoreflect.ExtensionTypeDescriptor); ok { + return extensionDescriptor{ed} + } + return fieldDescriptor{d} +} + +type services struct { + protoreflect.ServiceDescriptors +} + +func (s services) Get(i int) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.Get(i)} +} + +func (s services) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor { + return serviceDescriptor{s.ServiceDescriptors.ByName(n)} +} + +type messageDescriptor struct { + protoreflect.MessageDescriptor +} + +func (m messageDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MessageDescriptor.ParentFile()) +} + +func (m messageDescriptor) Parent() protoreflect.Descriptor { + d := m.MessageDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m messageDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{m.MessageDescriptor.Fields()} +} + +func (m messageDescriptor) Oneofs() protoreflect.OneofDescriptors { + return oneOfs{m.MessageDescriptor.Oneofs()} +} + +func (m messageDescriptor) Enums() protoreflect.EnumDescriptors { + return enums{m.MessageDescriptor.Enums()} +} + +func (m messageDescriptor) Messages() protoreflect.MessageDescriptors { + return messages{m.MessageDescriptor.Messages()} +} + +func (m messageDescriptor) Extensions() protoreflect.ExtensionDescriptors { + return extensions{m.MessageDescriptor.Extensions()} +} + +type fields struct { + protoreflect.FieldDescriptors +} + +func (f fields) Get(i int) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.Get(i)} +} + +func (f fields) ByName(n protoreflect.Name) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByName(n)} +} + +func (f fields) ByJSONName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByJSONName(n)} +} + +func (f fields) ByTextName(n string) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByTextName(n)} +} + +func (f fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return fieldDescriptor{f.FieldDescriptors.ByNumber(n)} +} + +type oneOfs struct { + protoreflect.OneofDescriptors +} + +func (o oneOfs) Get(i int) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.Get(i)} +} + +func (o oneOfs) ByName(n protoreflect.Name) protoreflect.OneofDescriptor { + return oneOfDescriptor{o.OneofDescriptors.ByName(n)} +} + +type fieldDescriptor struct { + protoreflect.FieldDescriptor +} + +func (f fieldDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(f.FieldDescriptor.ParentFile()) +} + +func (f fieldDescriptor) Parent() protoreflect.Descriptor { + d := f.FieldDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (f fieldDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := f.FieldDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (f fieldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := f.FieldDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (f fieldDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := f.FieldDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (f fieldDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{f.FieldDescriptor.ContainingMessage()} +} + +func (f fieldDescriptor) Enum() protoreflect.EnumDescriptor { + ed := f.FieldDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (f fieldDescriptor) Message() protoreflect.MessageDescriptor { + md := f.FieldDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +type oneOfDescriptor struct { + protoreflect.OneofDescriptor +} + +func (o oneOfDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(o.OneofDescriptor.ParentFile()) +} + +func (o oneOfDescriptor) Parent() protoreflect.Descriptor { + d := o.OneofDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (o oneOfDescriptor) Fields() protoreflect.FieldDescriptors { + return fields{o.OneofDescriptor.Fields()} +} + +type enumDescriptor struct { + protoreflect.EnumDescriptor +} + +func (e enumDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumDescriptor.ParentFile()) +} + +func (e enumDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e enumDescriptor) Values() protoreflect.EnumValueDescriptors { + return enumValues{e.EnumDescriptor.Values()} +} + +type enumValues struct { + protoreflect.EnumValueDescriptors +} + +func (e enumValues) Get(i int) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.Get(i)} +} + +func (e enumValues) ByName(n protoreflect.Name) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByName(n)} +} + +func (e enumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return enumValueDescriptor{e.EnumValueDescriptors.ByNumber(n)} +} + +type enumValueDescriptor struct { + protoreflect.EnumValueDescriptor +} + +func (e enumValueDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.EnumValueDescriptor.ParentFile()) +} + +func (e enumValueDescriptor) Parent() protoreflect.Descriptor { + d := e.EnumValueDescriptor.Parent() + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return enumDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +type extensionDescriptor struct { + protoreflect.ExtensionTypeDescriptor +} + +func (e extensionDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(e.ExtensionTypeDescriptor.ParentFile()) +} + +func (e extensionDescriptor) Parent() protoreflect.Descriptor { + d := e.ExtensionTypeDescriptor.Parent() + switch d := d.(type) { + case protoreflect.MessageDescriptor: + return messageDescriptor{d} + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (e extensionDescriptor) MapKey() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapKey() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) MapValue() protoreflect.FieldDescriptor { + fd := e.ExtensionTypeDescriptor.MapValue() + if fd == nil { + return nil + } + return fieldDescriptor{fd} +} + +func (e extensionDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor { + ed := e.ExtensionTypeDescriptor.DefaultEnumValue() + if ed == nil { + return nil + } + return enumValueDescriptor{ed} +} + +func (e extensionDescriptor) ContainingOneof() protoreflect.OneofDescriptor { + od := e.ExtensionTypeDescriptor.ContainingOneof() + if od == nil { + return nil + } + return oneOfDescriptor{od} +} + +func (e extensionDescriptor) ContainingMessage() protoreflect.MessageDescriptor { + return messageDescriptor{e.ExtensionTypeDescriptor.ContainingMessage()} +} + +func (e extensionDescriptor) Enum() protoreflect.EnumDescriptor { + ed := e.ExtensionTypeDescriptor.Enum() + if ed == nil { + return nil + } + return enumDescriptor{ed} +} + +func (e extensionDescriptor) Message() protoreflect.MessageDescriptor { + md := e.ExtensionTypeDescriptor.Message() + if md == nil { + return nil + } + return messageDescriptor{md} +} + +func (e extensionDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return e +} + +var _ protoreflect.ExtensionTypeDescriptor = extensionDescriptor{} + +type serviceDescriptor struct { + protoreflect.ServiceDescriptor +} + +func (s serviceDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(s.ServiceDescriptor.ParentFile()) +} + +func (s serviceDescriptor) Parent() protoreflect.Descriptor { + d := s.ServiceDescriptor.Parent() + switch d := d.(type) { + case protoreflect.FileDescriptor: + return getFile(d) + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (s serviceDescriptor) Methods() protoreflect.MethodDescriptors { + return methods{s.ServiceDescriptor.Methods()} +} + +type methods struct { + protoreflect.MethodDescriptors +} + +func (m methods) Get(i int) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.Get(i)} +} + +func (m methods) ByName(n protoreflect.Name) protoreflect.MethodDescriptor { + return methodDescriptor{m.MethodDescriptors.ByName(n)} +} + +type methodDescriptor struct { + protoreflect.MethodDescriptor +} + +func (m methodDescriptor) ParentFile() protoreflect.FileDescriptor { + return getFile(m.MethodDescriptor.ParentFile()) +} + +func (m methodDescriptor) Parent() protoreflect.Descriptor { + d := m.MethodDescriptor.Parent() + switch d := d.(type) { + case protoreflect.ServiceDescriptor: + return serviceDescriptor{d} + case nil: + return nil + default: + panic(fmt.Sprintf("unexpected descriptor type %T", d)) + } +} + +func (m methodDescriptor) Input() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Input()} +} + +func (m methodDescriptor) Output() protoreflect.MessageDescriptor { + return messageDescriptor{m.MethodDescriptor.Output()} +} + +type extensionType struct { + protoreflect.ExtensionType +} + +func (e extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return extensionDescriptor{e.ExtensionType.TypeDescriptor()} +} diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go index 1eaedfa004..6fca393707 100644 --- a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go +++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go @@ -4,11 +4,11 @@ package grpcdynamic import ( + "context" "fmt" "io" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -27,12 +27,7 @@ type Stub struct { // type used to construct Stubs. But the use of this interface allows // construction of stubs that use alternate concrete types as the transport for // RPC operations. -type Channel interface { - Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error - NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) -} - -var _ Channel = (*grpc.ClientConn)(nil) +type Channel = grpc.ClientConnInterface // NewStub creates a new RPC stub that uses the given channel for dispatching RPCs. func NewStub(channel Channel) Stub { @@ -79,6 +74,7 @@ func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDesc ClientStreams: method.IsClientStreaming(), } if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() return nil, err } else { err = cs.SendMsg(request) @@ -91,6 +87,11 @@ func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDesc cancel() return nil, err } + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() return &ServerStream{cs, method.GetOutputType(), s.mf}, nil } } @@ -108,8 +109,14 @@ func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDesc ClientStreams: method.IsClientStreaming(), } if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil { + cancel() return nil, err } else { + go func() { + // when the new stream is finished, also cleanup the parent context + <-cs.Context().Done() + cancel() + }() return &ClientStream{cs, method, s.mf, cancel}, nil } } diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go index 38e5632e11..02c8298b36 100644 --- a/vendor/github.com/jhump/protoreflect/dynamic/json.go +++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go @@ -368,7 +368,7 @@ func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.Fi default: return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type()) } - err := writeString(b, strkey) + err := writeJsonString(b, strkey) if err != nil { return err } @@ -674,8 +674,10 @@ func isWellKnownValue(fd *desc.FieldDescriptor) bool { } func isWellKnownListValue(fd *desc.FieldDescriptor) bool { + // we look for ListValue; but we also look for Value, which can be assigned a ListValue return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE && - fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" + (fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" || + fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value") } func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) { diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go index 43dce67e6c..03162a4de1 100644 --- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go @@ -1,4 +1,5 @@ -//+build !go1.12 +//go:build !go1.12 +// +build !go1.12 package dynamic diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go index 52eaa82e21..ef1b37050f 100644 --- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go +++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go @@ -1,4 +1,5 @@ -//+build go1.12 +//go:build go1.12 +// +build go1.12 package dynamic diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go index 3fca3eb0f0..70dc6ad1c5 100644 --- a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go @@ -2,6 +2,7 @@ package grpcreflect import ( "bytes" + "context" "fmt" "io" "reflect" @@ -10,7 +11,6 @@ import ( "github.com/golang/protobuf/proto" dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" - "golang.org/x/net/context" "google.golang.org/grpc/codes" rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" "google.golang.org/grpc/status" @@ -159,7 +159,11 @@ func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) FileByFilename: filename, }, } - fd, err := cr.getAndCacheFileDescriptors(req, filename, "") + accept := func(fd *desc.FileDescriptor) bool { + return fd.GetName() == filename + } + + fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept) if isNotFound(err) { // file not found? see if we can look up via alternate name if alternate, ok := internal.StdFileAliases[filename]; ok { @@ -168,7 +172,7 @@ func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) FileByFilename: alternate, }, } - fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename) + fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept) if isNotFound(err) { err = fileNotFound(filename, nil) } @@ -197,7 +201,10 @@ func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, err FileContainingSymbol: symbol, }, } - fd, err := cr.getAndCacheFileDescriptors(req, "", "") + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindSymbol(symbol) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) if isNotFound(err) { err = symbolNotFound(symbol, symbolTypeUnknown, nil) } else if e, ok := err.(*elementNotFoundError); ok { @@ -226,7 +233,10 @@ func (cr *Client) FileContainingExtension(extendedMessageName string, extensionN }, }, } - fd, err := cr.getAndCacheFileDescriptors(req, "", "") + accept := func(fd *desc.FileDescriptor) bool { + return fd.FindExtension(extendedMessageName, extensionNumber) != nil + } + fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept) if isNotFound(err) { err = extensionNotFound(extendedMessageName, extensionNumber, nil) } else if e, ok := err.(*elementNotFoundError); ok { @@ -235,7 +245,7 @@ func (cr *Client) FileContainingExtension(extendedMessageName string, extensionN return fd, err } -func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) { +func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) { resp, err := cr.send(req) if err != nil { return nil, err @@ -253,7 +263,7 @@ func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, e // should be the answer). If we're looking for a file by name, we can be // smarter and make sure to grab one by name instead of just grabbing the // first one. - var firstFd *dpb.FileDescriptorProto + var fds []*dpb.FileDescriptorProto for _, fdBytes := range fdResp.FileDescriptorProto { fd := &dpb.FileDescriptorProto{} if err = proto.Unmarshal(fdBytes, fd); err != nil { @@ -266,13 +276,6 @@ func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, e } cr.cacheMu.Lock() - // see if this file was created and cached concurrently - if firstFd == nil { - if d, ok := cr.filesByName[fd.GetName()]; ok { - cr.cacheMu.Unlock() - return d, nil - } - } // store in cache of raw descriptor protos, but don't overwrite existing protos if existingFd, ok := cr.protosByName[fd.GetName()]; ok { fd = existingFd @@ -280,15 +283,22 @@ func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, e cr.protosByName[fd.GetName()] = fd } cr.cacheMu.Unlock() - if firstFd == nil { - firstFd = fd - } + + fds = append(fds, fd) } - if firstFd == nil { - return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()} + + // find the right result from the files returned + for _, fd := range fds { + result, err := cr.descriptorFromProto(fd) + if err != nil { + return nil, err + } + if accept(result) { + return result, nil + } } - return cr.descriptorFromProto(firstFd) + return nil, status.Errorf(codes.NotFound, "response does not include expected file") } func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) { diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go index c9ef6192ee..7ff1912785 100644 --- a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go +++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go @@ -4,13 +4,19 @@ import ( "fmt" "google.golang.org/grpc" + "google.golang.org/grpc/reflection" "github.com/jhump/protoreflect/desc" ) +// GRPCServer is the interface provided by a gRPC server. In addition to being a +// service registrar (for registering services and handlers), it also has an +// accessor for retrieving metadata about all registered services. +type GRPCServer = reflection.GRPCServer + // LoadServiceDescriptors loads the service descriptors for all services exposed by the // given GRPC server. -func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) { +func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) { descs := map[string]*desc.ServiceDescriptor{} for name, info := range s.GetServiceInfo() { file, ok := info.Metadata.(string) diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore index b35f8449bf..d31b378152 100644 --- a/vendor/github.com/klauspost/compress/.gitignore +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -23,3 +23,10 @@ _testmain.go *.test *.prof /s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 0e2dc116ad..c7cf1a20c3 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,62 @@ This package provides various compression algorithms. # changelog +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + * Mar 3, 2022 (v1.15.0) * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) @@ -60,6 +116,9 @@ While the release has been extensively tested, it is recommended to testing when * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ See changes to v1.13.x + * Aug 30, 2021 (v1.13.5) * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) @@ -88,6 +147,8 @@ While the release has been extensively tested, it is recommended to testing when * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+
See changes to v1.12.x diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go deleted file mode 100644 index ff2c69d60c..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/autogen.go +++ /dev/null @@ -1,5 +0,0 @@ -package huff0 - -//go:generate go run generate.go -//go:generate asmfmt -w decompress_amd64.s -//go:generate asmfmt -w decompress_8b_amd64.s diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 451160edda..504a7be9da 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { return uint16(b.value >> ((64 - n) & 63)) } -// peekTopBits(n) is equvialent to peekBitFast(64 - n) -func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { - return uint16(b.value >> n) -} - func (b *bitReaderShifted) advance(n uint8) { b.bitsRead += n b.value <<= n & 63 @@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() { } } -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderShifted) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - func (b *bitReaderShifted) remaining() uint { return b.off*8 + uint(64-b.bitsRead) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 6bce4e87d4..ec71f7a349 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -5,8 +5,6 @@ package huff0 -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF} /* up to 16 bits */ -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - return - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - b.bitContainer >>= 1 << 3 - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - b.bitContainer >>= 2 << 3 - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - b.bitContainer >>= 3 << 3 - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - b.bitContainer >>= 4 << 3 - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - b.bitContainer >>= 5 << 3 - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - b.bitContainer >>= 6 << 3 - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - b.bitContainer >>= 7 << 3 - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - b.bitContainer = 0 - b.nBits = 0 - return - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -201,10 +93,3 @@ func (b *bitWriter) close() error { b.flushAlign() return nil } - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go index 50bcdf6ea9..4dcab8d232 100644 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { b.off = 0 } -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - // Int32 returns a little endian int32 starting at current offset. func (b byteReader) Int32() int32 { v3 := int32(b.b[b.off+3]) @@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 } -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - // remain will return the number of bytes remaining. func (b byteReader) remain() int { return len(b.b) - b.off diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index bc95ac623b..4d14542fac 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { return true } +//lint:ignore U1000 used for debugging func (s *Scratch) validateTable(c cTable) bool { if len(c) < int(s.symbolLen) { return false diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 04f6529955..c0c48bd707 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -11,7 +11,6 @@ import ( type dTable struct { single []dEntrySingle - double []dEntryDouble } // single-symbols decoding @@ -19,13 +18,6 @@ type dEntrySingle struct { entry uint16 } -// double-symbols decoding -type dEntryDouble struct { - seq [4]byte - nBits uint8 - len uint8 -} - // Uses special code for all tables that are < 8 bits. const use8BitTables = true @@ -35,7 +27,7 @@ const use8BitTables = true // If no Scratch is provided a new one is allocated. // The returned Scratch can be used for encoding or decoding input using this table. func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(in) + s, err = s.prepare(nil) if err != nil { return s, nil, err } @@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte { return &[4][256]byte{} } -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - // decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. // The cap of the output buffer will be the maximum decompressed size. // The length of the supplied input must match the end of a block exactly. @@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { const shift = 56 const tlSize = 1 << 8 - const tlMask = tlSize - 1 single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s deleted file mode 100644 index 0d6cb1a962..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s +++ /dev/null @@ -1,488 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -#define bufoff 256 // see decompress.go, we're using [4][256]byte table - -// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, -// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) -TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 -#define off R8 -#define buffer DI -#define table SI - -#define br_bits_read R9 -#define br_value R10 -#define br_offset R11 -#define peek_bits R12 -#define exhausted DX - -#define br0 R13 -#define br1 R14 -#define br2 R15 -#define br3 BP - - MOVQ BP, 0(SP) - - XORQ exhausted, exhausted // exhausted = false - XORQ off, off // off = 0 - - MOVBQZX peekBits+32(FP), peek_bits - MOVQ buf+40(FP), buffer - MOVQ tbl+48(FP), table - - MOVQ pbr0+0(FP), br0 - MOVQ pbr1+8(FP), br1 - MOVQ pbr2+16(FP), br2 - MOVQ pbr3+24(FP), br3 - -main_loop: - - // const stream = 0 - // br0.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read - MOVQ bitReaderShifted_value(br0), br_value - MOVQ bitReaderShifted_off(br0), br_offset - - // if b.bitsRead >= 32 { - CMPQ br_bits_read, $32 - JB skip_fill0 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br0), AX - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVQ br_bits_read, CX - SHLQ CL, AX - ORQ AX, br_value - - // exhausted = exhausted || (br0.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - - // } -skip_fill0: - - // val0 := br0.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br0.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val1 := br0.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br0.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 0(buffer)(off*1) - - // SECOND PART: - // val2 := br0.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v2 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br0.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val3 := br0.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v3 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br0.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off+2] = uint8(v2.entry >> 8) - // buf[stream][off+3] = uint8(v3.entry >> 8) - MOVW BX, 0+2(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br0) - MOVQ br_value, bitReaderShifted_value(br0) - MOVQ br_offset, bitReaderShifted_off(br0) - - // const stream = 1 - // br1.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read - MOVQ bitReaderShifted_value(br1), br_value - MOVQ bitReaderShifted_off(br1), br_offset - - // if b.bitsRead >= 32 { - CMPQ br_bits_read, $32 - JB skip_fill1 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br1), AX - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVQ br_bits_read, CX - SHLQ CL, AX - ORQ AX, br_value - - // exhausted = exhausted || (br1.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - - // } -skip_fill1: - - // val0 := br1.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br1.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val1 := br1.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br1.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 256(buffer)(off*1) - - // SECOND PART: - // val2 := br1.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v2 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br1.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val3 := br1.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v3 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br1.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off+2] = uint8(v2.entry >> 8) - // buf[stream][off+3] = uint8(v3.entry >> 8) - MOVW BX, 256+2(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br1) - MOVQ br_value, bitReaderShifted_value(br1) - MOVQ br_offset, bitReaderShifted_off(br1) - - // const stream = 2 - // br2.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read - MOVQ bitReaderShifted_value(br2), br_value - MOVQ bitReaderShifted_off(br2), br_offset - - // if b.bitsRead >= 32 { - CMPQ br_bits_read, $32 - JB skip_fill2 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br2), AX - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVQ br_bits_read, CX - SHLQ CL, AX - ORQ AX, br_value - - // exhausted = exhausted || (br2.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - - // } -skip_fill2: - - // val0 := br2.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br2.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val1 := br2.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br2.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 512(buffer)(off*1) - - // SECOND PART: - // val2 := br2.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v2 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br2.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val3 := br2.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v3 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br2.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off+2] = uint8(v2.entry >> 8) - // buf[stream][off+3] = uint8(v3.entry >> 8) - MOVW BX, 512+2(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br2) - MOVQ br_value, bitReaderShifted_value(br2) - MOVQ br_offset, bitReaderShifted_off(br2) - - // const stream = 3 - // br3.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read - MOVQ bitReaderShifted_value(br3), br_value - MOVQ bitReaderShifted_off(br3), br_offset - - // if b.bitsRead >= 32 { - CMPQ br_bits_read, $32 - JB skip_fill3 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br3), AX - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVQ br_bits_read, CX - SHLQ CL, AX - ORQ AX, br_value - - // exhausted = exhausted || (br3.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - - // } -skip_fill3: - - // val0 := br3.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br3.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val1 := br3.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br3.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 768(buffer)(off*1) - - // SECOND PART: - // val2 := br3.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v2 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br3.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val3 := br3.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v3 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br3.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // these two writes get coalesced - // buf[stream][off+2] = uint8(v2.entry >> 8) - // buf[stream][off+3] = uint8(v3.entry >> 8) - MOVW BX, 768+2(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br3) - MOVQ br_value, bitReaderShifted_value(br3) - MOVQ br_offset, bitReaderShifted_off(br3) - - ADDQ $4, off // off += 2 - - TESTB DH, DH // any br[i].ofs < 4? - JNZ end - - CMPQ off, $bufoff - JL main_loop - -end: - MOVQ 0(SP), BP - - MOVB off, ret+56(FP) - RET - -#undef off -#undef buffer -#undef table - -#undef br_bits_read -#undef br_value -#undef br_offset -#undef peek_bits -#undef exhausted - -#undef br0 -#undef br1 -#undef br2 -#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in deleted file mode 100644 index 6d477a2c11..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in +++ /dev/null @@ -1,197 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - - -#define bufoff 256 // see decompress.go, we're using [4][256]byte table - -//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, -// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) -TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 -#define off R8 -#define buffer DI -#define table SI - -#define br_bits_read R9 -#define br_value R10 -#define br_offset R11 -#define peek_bits R12 -#define exhausted DX - -#define br0 R13 -#define br1 R14 -#define br2 R15 -#define br3 BP - - MOVQ BP, 0(SP) - - XORQ exhausted, exhausted // exhausted = false - XORQ off, off // off = 0 - - MOVBQZX peekBits+32(FP), peek_bits - MOVQ buf+40(FP), buffer - MOVQ tbl+48(FP), table - - MOVQ pbr0+0(FP), br0 - MOVQ pbr1+8(FP), br1 - MOVQ pbr2+16(FP), br2 - MOVQ pbr3+24(FP), br3 - -main_loop: -{{ define "decode_2_values_x86" }} - // const stream = {{ var "id" }} - // br{{ var "id"}}.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read - MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value - MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset - - // if b.bitsRead >= 32 { - CMPQ br_bits_read, $32 - JB skip_fill{{ var "id" }} - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br{{ var "id" }}), AX - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVQ br_bits_read, CX - SHLQ CL, AX - ORQ AX, br_value - - // exhausted = exhausted || (br{{ var "id"}}.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - // } -skip_fill{{ var "id" }}: - - // val0 := br{{ var "id"}}.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br{{ var "id"}}.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val1 := br{{ var "id"}}.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br{{ var "id"}}.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, {{ var "bufofs" }}(buffer)(off*1) - - // SECOND PART: - // val2 := br{{ var "id"}}.peekTopBits(peekBits) - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v2 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br{{ var "id"}}.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - // val3 := br{{ var "id"}}.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - - // v3 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br{{ var "id"}}.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - MOVBQZX AL, CX - SHLQ CX, br_value // value <<= n - ADDQ CX, br_bits_read // bits_read += n - - - // these two writes get coalesced - // buf[stream][off+2] = uint8(v2.entry >> 8) - // buf[stream][off+3] = uint8(v3.entry >> 8) - MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) - MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) - MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) -{{ end }} - - {{ set "id" "0" }} - {{ set "ofs" "0" }} - {{ set "bufofs" "0" }} {{/* id * bufoff */}} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "1" }} - {{ set "ofs" "8" }} - {{ set "bufofs" "256" }} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "2" }} - {{ set "ofs" "16" }} - {{ set "bufofs" "512" }} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "3" }} - {{ set "ofs" "24" }} - {{ set "bufofs" "768" }} - {{ template "decode_2_values_x86" . }} - - ADDQ $4, off // off += 2 - - TESTB DH, DH // any br[i].ofs < 4? - JNZ end - - CMPQ off, $bufoff - JL main_loop -end: - MOVQ 0(SP), BP - - MOVB off, ret+56(FP) - RET -#undef off -#undef buffer -#undef table - -#undef br_bits_read -#undef br_value -#undef br_offset -#undef peek_bits -#undef exhausted - -#undef br0 -#undef br1 -#undef br2 -#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index d47f6644f3..9f3e9f79e2 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -2,30 +2,40 @@ // +build amd64,!appengine,!noasm,gc // This file contains the specialisation of Decoder.Decompress4X -// that uses an asm implementation of its main loop. +// and Decoder.Decompress1X that use an asm implementation of thir main loops. package huff0 import ( "errors" "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" ) // decompress4x_main_loop_x86 is an x86 assembler implementation // of Decompress4X when tablelog > 8. -// go:noescape -func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, - peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) // decompress4x_8b_loop_x86 is an x86 assembler implementation // of Decompress4X when tablelog <= 8 which decodes 4 entries // per loop. -// go:noescape -func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, - peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) // fallback8BitSize is the size where using Go version is faster. const fallback8BitSize = 800 +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + // Decompress4X will decompress a 4X encoded stream. // The length of the supplied input must match the end of a block exactly. // The *capacity* of the dst slice must match the destination size of @@ -42,6 +52,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { if cap(dst) < fallback8BitSize && use8BitTables { return d.decompress4X8bit(dst, src) } + var br [4]bitReaderShifted // Decode "jump table" start := 6 @@ -71,70 +82,25 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { const tlMask = tlSize - 1 single := d.dt.single[:tlSize] - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 var decoded int - const debug = false - - // see: bitReaderShifted.peekBitsFast() - peekBits := uint8((64 - d.actualTableLog) & 63) - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. } - if use8BitTables { - off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) + decompress4x_8b_main_loop_amd64(&ctx) } else { - off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) - } - if debug { - fmt.Print("DEBUG: ") - fmt.Printf("off=%d,", off) - for i := 0; i < 4; i++ { - fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}", - i, br[i].bitsRead, br[i].value, br[i].off) - } - fmt.Println("") - } - - if off != 0 { - break + decompress4x_main_loop_amd64(&ctx) } - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - copy(out, buf[0][:]) - copy(out[dstEvery:], buf[1][:]) - copy(out[dstEvery*2:], buf[2][:]) - copy(out[dstEvery*3:], buf[3][:]) - out = out[bufoff:] - decoded += bufoff * 4 - // There must at least be 3 buffers left. - if len(out) < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] + decoded = ctx.decoded + out = out[decoded/4:] } // Decode remaining. @@ -150,7 +116,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { for bitsLeft > 0 { br.fill() if offset >= endsAt { - d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -164,7 +129,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { offset++ } if offset != endsAt { - d.bufs.Put(buf) return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) } decoded += offset - dstEvery*i @@ -173,9 +137,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { return nil, err } } - d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } return dst, nil } + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 2edad3ea5a..dd1a5aecd6 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -1,506 +1,847 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -#ifdef GOAMD64_v4 -#ifndef GOAMD64_v3 -#define GOAMD64_v3 -#endif -#endif - -#define bufoff 256 // see decompress.go, we're using [4][256]byte table - -// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, -// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) -TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 -#define off R8 -#define buffer DI -#define table SI - -#define br_bits_read R9 -#define br_value R10 -#define br_offset R11 -#define peek_bits R12 -#define exhausted DX - -#define br0 R13 -#define br1 R14 -#define br2 R15 -#define br3 BP - - MOVQ BP, 0(SP) - - XORQ exhausted, exhausted // exhausted = false - XORQ off, off // off = 0 - - MOVBQZX peekBits+32(FP), peek_bits - MOVQ buf+40(FP), buffer - MOVQ tbl+48(FP), table - - MOVQ pbr0+0(FP), br0 - MOVQ pbr1+8(FP), br1 - MOVQ pbr2+16(FP), br2 - MOVQ pbr3+24(FP), br3 +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. -main_loop: - - // const stream = 0 - // br0.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read - MOVQ bitReaderShifted_value(br0), br_value - MOVQ bitReaderShifted_off(br0), br_offset +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc - // We must have at least 2 * max tablelog left - CMPQ br_bits_read, $64-22 - JBE skip_fill0 +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br0), AX + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 // b.value |= uint64(low) << (b.bitsRead & 63) -#ifdef GOAMD64_v3 - SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) - -#else - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - MOVQ br_bits_read, CX - SHLQ CL, AX - -#endif - - ORQ AX, br_value + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 // exhausted = exhausted || (br0.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL - // } skip_fill0: - // val0 := br0.peekTopBits(peekBits) -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask - -#else - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br0.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - -#endif - - ADDQ CX, br_bits_read // bits_read += n + MOVW (R10)(R14*2), CX -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 -#else // val1 := br0.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 + MOVW (R10)(R14*2), CX // br0.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - -#endif - - ADDQ CX, br_bits_read // bits_read += n + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 0(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br0) - MOVQ br_value, bitReaderShifted_value(br0) - MOVQ br_offset, bitReaderShifted_off(br0) - - // const stream = 1 - // br1.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read - MOVQ bitReaderShifted_value(br1), br_value - MOVQ bitReaderShifted_off(br1), br_offset - - // We must have at least 2 * max tablelog left - CMPQ br_bits_read, $64-22 - JBE skip_fill1 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br1), AX + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 // b.value |= uint64(low) << (b.bitsRead & 63) -#ifdef GOAMD64_v3 - SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) - -#else - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - MOVQ br_bits_read, CX - SHLQ CL, AX - -#endif - - ORQ AX, br_value + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 // exhausted = exhausted || (br1.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL - // } skip_fill1: - // val0 := br1.peekTopBits(peekBits) -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask - -#else - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br1.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - -#endif - - ADDQ CX, br_bits_read // bits_read += n + MOVW (R10)(R14*2), CX -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 -#else // val1 := br1.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 + MOVW (R10)(R14*2), CX // br1.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - -#endif - - ADDQ CX, br_bits_read // bits_read += n + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 256(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br1) - MOVQ br_value, bitReaderShifted_value(br1) - MOVQ br_offset, bitReaderShifted_off(br1) - - // const stream = 2 - // br2.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read - MOVQ bitReaderShifted_value(br2), br_value - MOVQ bitReaderShifted_off(br2), br_offset - - // We must have at least 2 * max tablelog left - CMPQ br_bits_read, $64-22 - JBE skip_fill2 - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br2), AX + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 // b.value |= uint64(low) << (b.bitsRead & 63) -#ifdef GOAMD64_v3 - SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) - -#else - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - MOVQ br_bits_read, CX - SHLQ CL, AX - -#endif - - ORQ AX, br_value + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 // exhausted = exhausted || (br2.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL - // } skip_fill2: - // val0 := br2.peekTopBits(peekBits) -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask - -#else - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br2.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n - -#endif + MOVW (R10)(R14*2), CX - ADDQ CX, br_bits_read // bits_read += n + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask - -#else // val1 := br2.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask - -#endif + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 + MOVW (R10)(R14*2), CX // br2.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n - -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 -#endif + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 - ADDQ CX, br_bits_read // bits_read += n + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 512(buffer)(off*1) +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br2) - MOVQ br_value, bitReaderShifted_value(br2) - MOVQ br_offset, bitReaderShifted_off(br2) + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX - // const stream = 3 - // br3.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read - MOVQ bitReaderShifted_value(br3), br_value - MOVQ bitReaderShifted_off(br3), br_offset + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 - // We must have at least 2 * max tablelog left - CMPQ br_bits_read, $64-22 - JBE skip_fill3 + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br3), AX + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 - // b.value |= uint64(low) << (b.bitsRead & 63) -#ifdef GOAMD64_v3 - SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET -#else - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - MOVQ br_bits_read, CX - SHLQ CL, AX +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX -#endif + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 - ORQ AX, br_value + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 - // exhausted = exhausted || (br3.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 - // } -skip_fill3: + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL - // val0 := br3.peekTopBits(peekBits) -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 -#else - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX -#endif + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 - // br3.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 -#endif + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX - ADDQ CX, br_bits_read // bits_read += n + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 -#else - // val1 := br3.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 -#endif + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 - // br3.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 -#endif + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 - ADDQ CX, br_bits_read // bits_read += n + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, 768(buffer)(off*1) +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br3) - MOVQ br_value, bitReaderShifted_value(br3) - MOVQ br_offset, bitReaderShifted_off(br3) + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX - ADDQ $2, off // off += 2 + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 - TESTB DH, DH // any br[i].ofs < 4? - JNZ end + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET - CMPQ off, $bufoff - JL main_loop +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition -end: - MOVQ 0(SP), BP +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET - MOVB off, ret+56(FP) + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) RET -#undef off -#undef buffer -#undef table +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition -#undef br_bits_read -#undef br_value -#undef br_offset -#undef peek_bits -#undef exhausted +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET -#undef br0 -#undef br1 -#undef br2 -#undef br3 + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in deleted file mode 100644 index 330d86ae15..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in +++ /dev/null @@ -1,195 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -#ifdef GOAMD64_v4 -#ifndef GOAMD64_v3 -#define GOAMD64_v3 -#endif -#endif - -#define bufoff 256 // see decompress.go, we're using [4][256]byte table - -//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, -// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) -TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 -#define off R8 -#define buffer DI -#define table SI - -#define br_bits_read R9 -#define br_value R10 -#define br_offset R11 -#define peek_bits R12 -#define exhausted DX - -#define br0 R13 -#define br1 R14 -#define br2 R15 -#define br3 BP - - MOVQ BP, 0(SP) - - XORQ exhausted, exhausted // exhausted = false - XORQ off, off // off = 0 - - MOVBQZX peekBits+32(FP), peek_bits - MOVQ buf+40(FP), buffer - MOVQ tbl+48(FP), table - - MOVQ pbr0+0(FP), br0 - MOVQ pbr1+8(FP), br1 - MOVQ pbr2+16(FP), br2 - MOVQ pbr3+24(FP), br3 - -main_loop: -{{ define "decode_2_values_x86" }} - // const stream = {{ var "id" }} - // br{{ var "id"}}.fillFast() - MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read - MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value - MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset - - // We must have at least 2 * max tablelog left - CMPQ br_bits_read, $64-22 - JBE skip_fill{{ var "id" }} - - SUBQ $32, br_bits_read // b.bitsRead -= 32 - SUBQ $4, br_offset // b.off -= 4 - - // v := b.in[b.off-4 : b.off] - // v = v[:4] - // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - MOVQ bitReaderShifted_in(br{{ var "id" }}), AX - - // b.value |= uint64(low) << (b.bitsRead & 63) -#ifdef GOAMD64_v3 - SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) -#else - MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) - MOVQ br_bits_read, CX - SHLQ CL, AX -#endif - - ORQ AX, br_value - - // exhausted = exhausted || (br{{ var "id"}}.off < 4) - CMPQ br_offset, $4 - SETLT DL - ORB DL, DH - // } -skip_fill{{ var "id" }}: - - // val0 := br{{ var "id"}}.peekTopBits(peekBits) -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask -#else - MOVQ br_value, AX - MOVQ peek_bits, CX - SHRQ CL, AX // AX = (value >> peek_bits) & mask -#endif - - // v0 := table[val0&mask] - MOVW 0(table)(AX*2), AX // AX - v0 - - // br{{ var "id"}}.advance(uint8(v0.entry)) - MOVB AH, BL // BL = uint8(v0.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n -#endif - - ADDQ CX, br_bits_read // bits_read += n - - -#ifdef GOAMD64_v3 - SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask -#else - // val1 := br{{ var "id"}}.peekTopBits(peekBits) - MOVQ peek_bits, CX - MOVQ br_value, AX - SHRQ CL, AX // AX = (value >> peek_bits) & mask -#endif - - // v1 := table[val1&mask] - MOVW 0(table)(AX*2), AX // AX - v1 - - // br{{ var "id"}}.advance(uint8(v1.entry)) - MOVB AH, BH // BH = uint8(v1.entry >> 8) - -#ifdef GOAMD64_v3 - MOVBQZX AL, CX - SHLXQ AX, br_value, br_value // value <<= n -#else - MOVBQZX AL, CX - SHLQ CL, br_value // value <<= n -#endif - - ADDQ CX, br_bits_read // bits_read += n - - - // these two writes get coalesced - // buf[stream][off] = uint8(v0.entry >> 8) - // buf[stream][off+1] = uint8(v1.entry >> 8) - MOVW BX, {{ var "bufofs" }}(buffer)(off*1) - - // update the bitrader reader structure - MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) - MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) - MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) -{{ end }} - - {{ set "id" "0" }} - {{ set "ofs" "0" }} - {{ set "bufofs" "0" }} {{/* id * bufoff */}} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "1" }} - {{ set "ofs" "8" }} - {{ set "bufofs" "256" }} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "2" }} - {{ set "ofs" "16" }} - {{ set "bufofs" "512" }} - {{ template "decode_2_values_x86" . }} - - {{ set "id" "3" }} - {{ set "ofs" "24" }} - {{ set "bufofs" "768" }} - {{ template "decode_2_values_x86" . }} - - ADDQ $2, off // off += 2 - - TESTB DH, DH // any br[i].ofs < 4? - JNZ end - - CMPQ off, $bufoff - JL main_loop -end: - MOVQ 0(SP), BP - - MOVB off, ret+56(FP) - RET -#undef off -#undef buffer -#undef table - -#undef br_bits_read -#undef br_value -#undef br_offset -#undef peek_bits -#undef exhausted - -#undef br0 -#undef br1 -#undef br2 -#undef br3 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go index 126b4d68a9..4f6f37cb2c 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { } return dst, nil } + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 0000000000..3954c51219 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 0000000000..e802579c4f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 0000000000..4465fbe9e9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index e3445ac194..beb7fa8720 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co ### Benchmarks -These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). - The first two are streaming decodes and the last are smaller inputs. - + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + ``` -BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op -BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op - -BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op -BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op - -Concurrent performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op - -BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op ``` -This reflects the performance around May 2020, but this may be out of date. +This reflects the performance around May 2022, but this may be out of date. ## Zstd inside ZIP files diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index d7cd15ba29..97299d499c 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 { return v } -func (b *bitReader) get16BitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - // fillFast() will make sure at least 32 bits are available. // There must be at least 4 bytes available. func (b *bitReader) fillFast() { diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index b366182850..78b3c61be3 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -5,8 +5,6 @@ package zstd -import "fmt" - // bitWriter will write bits. // First bit will be LSB of the first byte of output. type bitWriter struct { @@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { b.nBits += bits } -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 7d567a54a0..7eed729be2 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,9 +5,14 @@ package zstd import ( + "bytes" + "encoding/binary" "errors" "fmt" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -38,14 +43,14 @@ const ( // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) maxCompressedBlockSize = 128 << 10 + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + // Maximum possible block size (all Raw+Uncompressed). maxBlockSize = (1 << 21) - 1 - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header - maxCompressedLiteralSize = 1 << 18 - maxRLELiteralSize = 1 << 20 - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff // We support slightly less than the reference decoder to be able to // use ints on 32 bit archs. @@ -97,7 +102,6 @@ type blockDec struct { // Block is RLE, this is the size. RLESize uint32 - tmp [4]byte Type blockType @@ -136,7 +140,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { b.Type = blockType((bh >> 1) & 3) // find size. cSize := int(bh >> 3) - maxSize := maxBlockSize + maxSize := maxCompressedBlockSizeAlloc switch b.Type { case blockTypeReserved: return ErrReservedBlockType @@ -157,9 +161,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { println("Data size on stream:", cSize) } b.RLESize = 0 - maxSize = maxCompressedBlockSize + maxSize = maxCompressedBlockSizeAlloc if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + maxSize = int(windowSize) + compressedBlockOverAlloc } if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { if debugDecoder { @@ -190,9 +194,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { // Read block data. if cap(b.dataStorage) < cSize { if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize) + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSize) + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } if cap(b.dst) <= maxSize { @@ -360,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, litRegenSize) + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) } else { - if litRegenSize > maxCompressedLiteralSize { - // Exceptional - b.literalBuf = make([]byte, litRegenSize) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) } } literals = b.literalBuf[:litRegenSize] @@ -397,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } var err error // Use our out buffer. - huff.MaxDecodedSize = maxCompressedBlockSize + huff.MaxDecodedSize = litRegenSize if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) } else { @@ -429,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize) + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) } } huff := hist.huffTree @@ -448,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err return in, err } hist.huffTree = huff - huff.MaxDecodedSize = maxCompressedBlockSize + huff.MaxDecodedSize = litRegenSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -463,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err if len(literals) != litRegenSize { return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } @@ -486,10 +487,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.dst = append(b.dst, hist.decoders.literals...) return nil } - err = hist.decoders.decodeSync(hist) + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) if err != nil { return err } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } b.dst = hist.decoders.out hist.recentOffsets = hist.decoders.prevOffset return nil @@ -632,6 +638,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + return nil } @@ -650,6 +672,7 @@ func (b *blockDec) decodeSequences(hist *history) error { } hist.decoders.windowSize = hist.windowSize hist.decoders.prevOffset = hist.recentOffsets + err := hist.decoders.decode(b.sequence) hist.recentOffsets = hist.decoders.prevOffset return err diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index b80191e4b1..4493baa756 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { return r, nil } -func (b *byteBuf) remain() []byte { - return *b -} - func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go index 2c4fca17fa..0e59a242d8 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -13,12 +13,6 @@ type byteReader struct { off int } -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - // advance the stream b n bytes. func (b *byteReader) advance(n uint) { b.off += int(n) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 9fcdaac1dc..286c8f9d71 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.history.setDict(&dict) } - - if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { - return dst, ErrDecoderSizeExceeded + if frame.WindowSize > d.o.maxWindowSize { + return dst, ErrWindowSizeExceeded } - if frame.FrameContentSize < 1<<30 { - // Never preallocate more than 1 GB up front. + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { + return dst, ErrDecoderSizeExceeded + } if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) copy(dst2, dst) dst = dst2 } } + if cap(dst) == 0 { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. @@ -437,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } - if len(next.b) > 0 { + if !d.o.ignoreChecksum && len(next.b) > 0 { n, err := d.current.crc.Write(next.b) if err == nil { if n != len(next.b) { @@ -449,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { got := d.current.crc.Sum64() var tmp [4]byte binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { if debugDecoder { println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") } @@ -533,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) { // Update/Check CRC if d.frame.HasCheckSum { - d.frame.crc.Write(d.current.b) + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } if d.current.d.Last { - d.current.err = d.frame.checkCRC() + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } if d.current.err != nil { println("CRC error:", d.current.err) return false @@ -629,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error { // Create Decoder: // ASYNC: -// Spawn 4 go routines. -// 0: Read frames and decode blocks. -// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. -// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. -// 3: Wait for stream history, execute sequences, send stream history. +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() br := readerWrapper{r: r} - var seqPrepare = make(chan *blockDec, d.o.concurrent) var seqDecode = make(chan *blockDec, d.o.concurrent) var seqExecute = make(chan *blockDec, d.o.concurrent) - // Async 1: Prepare blocks... - go func() { - var hist history - var hasErr bool - for block := range seqPrepare { - if hasErr { - if block != nil { - seqDecode <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history") - } - hist.reset() - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - continue - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - close(seqDecode) - }() - - // Async 2: Decode sequences... + // Async 1: Decode sequences... go func() { var hist history var hasErr bool @@ -696,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch } if block.async.newHist != nil { if debugDecoder { - println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) } hist.decoders = block.async.newHist.decoders hist.recentOffsets = block.async.newHist.recentOffsets @@ -750,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch } if block.async.newHist != nil { if debugDecoder { - println("Async 3: new history") + println("Async 2: new history") } hist.windowSize = block.async.newHist.windowSize hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer @@ -837,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch decodeStream: for { + var hist history + var hasErr bool + + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } frame := d.frame if debugDecoder { println("New frame...") @@ -863,7 +856,7 @@ decodeStream: case <-ctx.Done(): case dec := <-d.decoders: dec.sendErr(err) - seqPrepare <- dec + decodeBlock(dec) } break decodeStream } @@ -883,6 +876,10 @@ decodeStream: if debugDecoder { println("Alloc History:", h.allocFrameBuffer) } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } dec.async.newHist = &h dec.async.fcs = frame.FrameContentSize historySent = true @@ -909,7 +906,7 @@ decodeStream: } err = dec.err last := dec.Last - seqPrepare <- dec + decodeBlock(dec) if err != nil { break decodeStream } @@ -918,7 +915,7 @@ decodeStream: } } } - close(seqPrepare) + close(seqDecode) wg.Wait() d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index fd05c9bb01..c70e6fa0f7 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -19,6 +19,7 @@ type decoderOptions struct { maxDecodedSize uint64 maxWindowSize uint64 dicts []dict + ignoreChecksum bool } func (o *decoderOptions) setDefault() { @@ -31,7 +32,7 @@ func (o *decoderOptions) setDefault() { if o.concurrent > 4 { o.concurrent = 4 } - o.maxDecodedSize = 1 << 63 + o.maxDecodedSize = 64 << 30 } // WithDecoderLowmem will set whether to use a lower amount of memory, @@ -66,7 +67,7 @@ func WithDecoderConcurrency(n int) DOption { // WithDecoderMaxMemory allows to set a maximum decoded size for in-memory // non-streaming operations or maximum window size for streaming operations. // This can be used to control memory usage of potentially hostile content. -// Maximum and default is 1 << 63 bytes. +// Maximum is 1 << 63 bytes. Default is 64GiB. func WithDecoderMaxMemory(n uint64) DOption { return func(o *decoderOptions) error { if n == 0 { @@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption { return nil } } + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 602c05ee0c..c769f6941d 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -156,8 +156,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -518,8 +518,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match @@ -674,8 +674,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -1047,8 +1047,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d6b3104240..7ff0c64fa3 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -127,8 +127,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -439,8 +439,8 @@ encodeLoop: var t int32 for { - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -785,8 +785,8 @@ encodeLoop: panic("offset0 was 0") } - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] @@ -969,7 +969,7 @@ encodeLoop: te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) e.longTable[longHash1] = te0 e.longTable[longHash2] = te1 e.markLongShardDirty(longHash1) @@ -1002,8 +1002,8 @@ encodeLoop: } // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) // We have at least 4 byte match. // No need to check backwards. We come straight from a match diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index dcc987a7cb..e6b1d01cf6 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } // If we can do everything in one block, prefer that. - if len(src) <= maxCompressedBlockSize { + if len(src) <= e.o.blockSize { enc.Reset(e.o.dict, true) // Slightly faster with no history and everything in one block. if e.o.crc { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 11089d2232..fa0a633f38 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error { return ErrWindowSizeTooSmall } d.history.windowSize = int(d.WindowSize) - if d.o.lowMem && d.history.windowSize < maxBlockSize { + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or very small window size. d.history.allocFrameBuffer = d.history.windowSize * 2 - // TODO: Maybe use FrameContent size } else { + // Alloc with one additional block d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } @@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error { if !d.HasCheckSum { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) // We can overwrite upper tmp now want, err := d.rawInput.readSmall(4) @@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) && !ignoreCRC { + if d.o.ignoreChecksum { + return nil + } + + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + if !bytes.Equal(tmp[:], want) { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error { return nil } +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b @@ -326,6 +345,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.FrameContentSize != fcsUnknown { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } var err error for { err = dec.reset(d.rawInput, d.WindowSize) @@ -360,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { err = ErrFrameSizeMismatch } else if d.HasCheckSum { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go index bb3d4fd6c3..2f8860a722 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -5,8 +5,10 @@ package zstd import ( + "encoding/binary" "errors" "fmt" + "io" ) const ( @@ -178,10 +180,32 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { return fmt.Errorf("corruption detected (total %d != %d)", gotTotal, 1<> 3) - // println(s.norm[:s.symbolLen], s.symbolLen) return s.buildDtable() } +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + // decSymbol contains information about a state entry, // Including the state offset base, the output symbol and // the number of bits to read for the low part of the destination state. @@ -204,18 +228,10 @@ func (d decSymbol) newState() uint16 { return uint16(d >> 16) } -func (d decSymbol) baseline() uint32 { - return uint32(d >> 32) -} - func (d decSymbol) baselineInt() int { return int(d >> 32) } -func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { - *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - func (d *decSymbol) setNBits(nBits uint8) { const mask = 0xffffffffffffff00 *d = (*d & mask) | decSymbol(nBits) @@ -231,11 +247,6 @@ func (d *decSymbol) setNewState(state uint16) { *d = (*d & mask) | decSymbol(state)<<16 } -func (d *decSymbol) setBaseline(baseline uint32) { - const mask = 0xffffffff - *d = (*d & mask) | decSymbol(baseline)<<32 -} - func (d *decSymbol) setExt(addBits uint8, baseline uint32) { const mask = 0xffff00ff *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) @@ -257,68 +268,6 @@ func (s *fseDecoder) setRLE(symbol decSymbol) { s.dt[0] = symbol } -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} - // transform will transform the decoder table into a table usable for // decoding without having to apply the transformation while decoding. // The state will contain the base value and the number of bits to read. @@ -352,34 +301,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { s.state = dt[br.getBits(tableLog)] } -// next returns the current symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) next(br *bitReader) { - lowBits := uint16(br.getBits(s.state.nbBits())) - s.state = s.dt[s.state.newState()+lowBits] -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (s *fseState) finished(br *bitReader) bool { - return br.finished() && s.state.nbBits() > 0 -} - -// final returns the current state symbol without decoding the next. -func (s *fseState) final() (int, uint8) { - return s.state.baselineInt(), s.state.addBits() -} - // final returns the current state symbol without decoding the next. func (s decSymbol) final() (int, uint8) { return s.baselineInt(), s.addBits() } - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { - lowBits := br.get16BitsFast(s.state.nbBits()) - s.state = s.dt[s.state.newState()+lowBits] - return s.state.baseline(), s.state.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 0000000000..e74df436cf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,64 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: (*uint16)(&s.stateTable[0]), + norm: (*int16)(&s.norm[0]), + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 0000000000..da32b4420e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,127 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 0000000000..332e51fe44 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index 5442061b18..ab26326a8f 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.clearCount = maxCount != 0 } -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *fseEncoder) prepare() (*fseEncoder, error) { - if s == nil { - s = &fseEncoder{} - } - s.useRLE = false - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - return s, nil -} - // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { @@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.state = c.stateTable[lu] } -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go deleted file mode 100644 index 7f2210e053..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fuzz.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build ignorecrc -// +build ignorecrc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// ignoreCRC can be used for fuzz testing to ignore CRC values... -const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go deleted file mode 100644 index 6811c68a89..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !ignorecrc -// +build !ignorecrc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// ignoreCRC can be used for fuzz testing to ignore CRC values... -const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go index cf33f29a1b..5d73c21ebd 100644 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 { return (uint32(u) * prime4bytes) >> (32 - length) } } - -// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash3(u uint32, h uint8) uint32 { - return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 819f1461b7..df04472030 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -73,6 +73,7 @@ type sequenceDecs struct { seqSize int windowSize int maxBits uint8 + maxSyncLen uint64 } // initialize all 3 decoders from the stream input. @@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro return nil } -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - // execute will execute the decoded sequence with the provided history. // The sequence must be evaluated before being sent. func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + if len(s.dict) == 0 { + return s.executeSimple(seqs, hist) + } + // Ensure we have enough output size... if len(s.out)+s.seqSize > cap(s.out) { addBytes := s.seqSize + len(s.out) @@ -327,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { } } } + // Add final literals copy(out[t:], s.literals) if debugDecoder { @@ -341,14 +203,18 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { } // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(history *history) error { +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + br := s.br seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - hist := history.b[history.ignoreBuffer:] out := s.out maxBlockSize := maxCompressedBlockSize if s.windowSize < maxBlockSize { @@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) + return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) } if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions @@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error { if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } // we may be in dictionary. dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) } end := dictO + ml if end > len(s.dict) { @@ -530,6 +396,7 @@ func (s *sequenceDecs) decodeSync(history *history) error { ofState = ofTable[ofState.newState()&maxTableMask] } else { bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) llState = llTable[(llState.newState()+lowBits)&maxTableMask] @@ -543,8 +410,8 @@ func (s *sequenceDecs) decodeSync(history *history) error { } // Check if space for literals - if len(s.literals)+len(s.out)-startSize > maxBlockSize { - return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize) + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize) } // Add final literals @@ -552,16 +419,6 @@ func (s *sequenceDecs) decodeSync(history *history) error { return br.close() } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) update(br *bitReader) { - // Max 8 bits - s.litLengths.state.next(br) - // Max 9 bits - s.matchLengths.state.next(br) - // Max 8 bits - s.offsets.state.next(br) -} - var bitMask [16]uint16 func init() { @@ -570,87 +427,6 @@ func init() { } } -// update states, at least 27 bits must be available. -func (s *sequenceDecs) updateAlt(br *bitReader) { - // Update all 3 states at once. Approx 20% faster. - a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - - nBits := a.nbBits() + b.nbBits() + c.nbBits() - if nBits == 0 { - s.litLengths.state.state = s.litLengths.state.dt[a.newState()] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] - s.offsets.state.state = s.offsets.state.dt[c.newState()] - return - } - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) - s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] - - lowBits = uint16(bits >> (c.nbBits() & 31)) - lowBits &= bitMask[b.nbBits()&15] - s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] - - lowBits = uint16(bits) & bitMask[c.nbBits()&15] - s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] -} - -// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. -func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - return - } - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - return - } - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - return -} - func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { // Final will not read from stream. ll, llB := llState.final() diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 0000000000..847b322ae3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,362 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 0000000000..71e64e0612 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4016 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm +// +build !appengine,!noasm,gc,!noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_adjust_end + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R15 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R15 + ADDQ R15, AX + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_adjust_end + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_adjust_end: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_adjust_end + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_adjust_end + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_adjust_end: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_adjust_end + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_end + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + ADDQ CX, BX + NEGL CX + SHRQ CL, R14 + SHRQ $0x20, AX + TESTQ CX, CX + CMOVQEQ CX, R14 + ADDQ R14, AX + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_end + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_end + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + LEAQ 144(CX), R15 + ADDQ (R15)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_adjust_end: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 0000000000..c3452bc3a9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index ffffcbc254..29c15c8c4e 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -18,26 +18,44 @@ const ZipMethodWinZip = 93 // See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT const ZipMethodPKWare = 20 -var zipReaderPool sync.Pool +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} // newZipReader creates a pooled zip decompressor. -func newZipReader(r io.Reader) io.ReadCloser { - dec, ok := zipReaderPool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true)) - if err != nil { - panic(err) +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d } - dec = d + return &pooledZipReader{dec: dec, pool: pool} } - return &pooledZipReader{dec: dec} } type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - dec *Decoder + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder } func (r *pooledZipReader) Read(p []byte) (n int, err error) { @@ -48,8 +66,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) { } dec, err := r.dec.Read(p) if err == io.EOF { - err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.dec.Reset(nil) + r.pool.Put(r.dec) r.dec = nil } return dec, err @@ -61,7 +79,7 @@ func (r *pooledZipReader) Close() error { var err error if r.dec != nil { err = r.dec.Reset(nil) - zipReaderPool.Put(r.dec) + r.pool.Put(r.dec) r.dec = nil } return err @@ -115,6 +133,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { // ZipDecompressor returns a decompressor that can be registered with zip libraries. // See ZipCompressor for example. -func ZipDecompressor() func(r io.Reader) io.ReadCloser { - return newZipReader +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) } diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index c1c90b4a07..3eb3f1c826 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) { } } -// matchLenFast does matching, but will not match the last up to 7 bytes. -func matchLenFast(a, b []byte) int { - endI := len(a) & (math.MaxInt32 - 7) - for i := 0; i < endI; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + bits.TrailingZeros64(diff)>>3 - } - } - return endI -} - // matchLen returns the maximum length. // a must be the shortest of the two. // The function also returns whether all bytes matched. diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go index 3a9cc1036e..02b3515bdc 100644 --- a/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go +++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.28.0 +// protoc v3.20.1 // source: core.proto package proto @@ -807,6 +807,69 @@ func (x *Order) GetV2Authorizations() []int64 { return nil } +type CRLEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + Reason int32 `protobuf:"varint,2,opt,name=reason,proto3" json:"reason,omitempty"` + RevokedAt int64 `protobuf:"varint,3,opt,name=revokedAt,proto3" json:"revokedAt,omitempty"` // Unix timestamp (nanoseconds) +} + +func (x *CRLEntry) Reset() { + *x = CRLEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_core_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CRLEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CRLEntry) ProtoMessage() {} + +func (x *CRLEntry) ProtoReflect() protoreflect.Message { + mi := &file_core_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CRLEntry.ProtoReflect.Descriptor instead. +func (*CRLEntry) Descriptor() ([]byte, []int) { + return file_core_proto_rawDescGZIP(), []int{8} +} + +func (x *CRLEntry) GetSerial() string { + if x != nil { + return x.Serial + } + return "" +} + +func (x *CRLEntry) GetReason() int32 { + if x != nil { + return x.Reason + } + return 0 +} + +func (x *CRLEntry) GetRevokedAt() int64 { + if x != nil { + return x.RevokedAt + } + return 0 +} + var File_core_proto protoreflect.FileDescriptor var file_core_proto_rawDesc = []byte{ @@ -935,10 +998,16 @@ var file_core_proto_rawDesc = []byte{ 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, - 0x06, 0x10, 0x07, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, - 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x06, 0x10, 0x07, 0x22, 0x58, 0x0a, 0x08, 0x43, 0x52, 0x4c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x41, 0x74, 0x42, 0x2b, 0x5a, + 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x65, 0x74, 0x73, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x2f, 0x62, 0x6f, 0x75, 0x6c, 0x64, 0x65, 0x72, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -953,7 +1022,7 @@ func file_core_proto_rawDescGZIP() []byte { return file_core_proto_rawDescData } -var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_core_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_core_proto_goTypes = []interface{}{ (*Challenge)(nil), // 0: core.Challenge (*ValidationRecord)(nil), // 1: core.ValidationRecord @@ -963,6 +1032,7 @@ var file_core_proto_goTypes = []interface{}{ (*Registration)(nil), // 5: core.Registration (*Authorization)(nil), // 6: core.Authorization (*Order)(nil), // 7: core.Order + (*CRLEntry)(nil), // 8: core.CRLEntry } var file_core_proto_depIdxs = []int32{ 1, // 0: core.Challenge.validationrecords:type_name -> core.ValidationRecord @@ -1078,6 +1148,18 @@ func file_core_proto_init() { return nil } } + file_core_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CRLEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1085,7 +1167,7 @@ func file_core_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_core_proto_rawDesc, NumEnums: 0, - NumMessages: 8, + NumMessages: 9, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto index 06abe5e99e..946bb16c87 100644 --- a/vendor/github.com/letsencrypt/boulder/core/proto/core.proto +++ b/vendor/github.com/letsencrypt/boulder/core/proto/core.proto @@ -93,3 +93,9 @@ message Order { int64 created = 10; repeated int64 v2Authorizations = 11; } + +message CRLEntry { + string serial = 1; + int32 reason = 2; + int64 revokedAt = 3; // Unix timestamp (nanoseconds) +} diff --git a/vendor/github.com/letsencrypt/boulder/errors/errors.go b/vendor/github.com/letsencrypt/boulder/errors/errors.go index 3ca9988a6b..861d54ad17 100644 --- a/vendor/github.com/letsencrypt/boulder/errors/errors.go +++ b/vendor/github.com/letsencrypt/boulder/errors/errors.go @@ -1,3 +1,13 @@ +// Package errors provides internal-facing error types for use in Boulder. Many +// of these are transformed directly into Problem Details documents by the WFE. +// Some, like NotFound, may be handled internally. We avoid using Problem +// Details documents as part of our internal error system to avoid layering +// confusions. +// +// These errors are specifically for use in errors that cross RPC boundaries. +// An error type that does not need to be passed through an RPC can use a plain +// Go type locally. Our gRPC code is aware of these error types and will +// serialize and deserialize them automatically. package errors import ( @@ -12,7 +22,10 @@ import ( // BoulderError wrapping one of these types. type ErrorType int +// These numeric constants are used when sending berrors through gRPC. const ( + // InternalServer is deprecated. Instead, pass a plain Go error. That will get + // turned into a probs.InternalServerError by the WFE. InternalServer ErrorType = iota _ Malformed @@ -101,6 +114,20 @@ func RateLimitError(msg string, args ...interface{}) error { } } +func DuplicateCertificateError(msg string, args ...interface{}) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/duplicate-certificate-limit/", args...), + } +} + +func FailedValidationError(msg string, args ...interface{}) error { + return &BoulderError{ + Type: RateLimit, + Detail: fmt.Sprintf(msg+": see https://letsencrypt.org/docs/failed-validation-limit/", args...), + } +} + func RejectedIdentifierError(msg string, args ...interface{}) error { return New(RejectedIdentifier, msg, args...) } diff --git a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go index b3b68b7059..b8f4250901 100644 --- a/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go +++ b/vendor/github.com/letsencrypt/boulder/features/featureflag_string.go @@ -15,27 +15,35 @@ func _() { _ = x[StoreIssuerInfo-4] _ = x[StreamlineOrderAndAuthzs-5] _ = x[V1DisableNewValidations-6] - _ = x[CAAValidationMethods-7] - _ = x[CAAAccountURI-8] - _ = x[EnforceMultiVA-9] - _ = x[MultiVAFullResults-10] - _ = x[MandatoryPOSTAsGET-11] - _ = x[AllowV1Registration-12] - _ = x[StoreRevokerInfo-13] - _ = x[RestrictRSAKeySizes-14] - _ = x[FasterNewOrdersRateLimit-15] - _ = x[ECDSAForAll-16] - _ = x[ServeRenewalInfo-17] - _ = x[GetAuthzReadOnly-18] - _ = x[GetAuthzUseIndex-19] - _ = x[CheckFailedAuthorizationsFirst-20] - _ = x[AllowReRevocation-21] - _ = x[MozRevocationReasons-22] + _ = x[ExpirationMailerDontLookTwice-7] + _ = x[CAAValidationMethods-8] + _ = x[CAAAccountURI-9] + _ = x[EnforceMultiVA-10] + _ = x[MultiVAFullResults-11] + _ = x[MandatoryPOSTAsGET-12] + _ = x[AllowV1Registration-13] + _ = x[StoreRevokerInfo-14] + _ = x[RestrictRSAKeySizes-15] + _ = x[FasterNewOrdersRateLimit-16] + _ = x[ECDSAForAll-17] + _ = x[ServeRenewalInfo-18] + _ = x[GetAuthzReadOnly-19] + _ = x[GetAuthzUseIndex-20] + _ = x[CheckFailedAuthorizationsFirst-21] + _ = x[AllowReRevocation-22] + _ = x[MozRevocationReasons-23] + _ = x[OldTLSOutbound-24] + _ = x[OldTLSInbound-25] + _ = x[SHA1CSRs-26] + _ = x[AllowUnrecognizedFeatures-27] + _ = x[RejectDuplicateCSRExtensions-28] + _ = x[ROCSPStage1-29] + _ = x[ROCSPStage2-30] } -const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasons" +const _FeatureFlag_name = "unusedPrecertificateRevocationStripDefaultSchemePortNonCFSSLSignerStoreIssuerInfoStreamlineOrderAndAuthzsV1DisableNewValidationsExpirationMailerDontLookTwiceCAAValidationMethodsCAAAccountURIEnforceMultiVAMultiVAFullResultsMandatoryPOSTAsGETAllowV1RegistrationStoreRevokerInfoRestrictRSAKeySizesFasterNewOrdersRateLimitECDSAForAllServeRenewalInfoGetAuthzReadOnlyGetAuthzUseIndexCheckFailedAuthorizationsFirstAllowReRevocationMozRevocationReasonsOldTLSOutboundOldTLSInboundSHA1CSRsAllowUnrecognizedFeaturesRejectDuplicateCSRExtensionsROCSPStage1ROCSPStage2" -var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 148, 161, 175, 193, 211, 230, 246, 265, 289, 300, 316, 332, 348, 378, 395, 415} +var _FeatureFlag_index = [...]uint16{0, 6, 30, 52, 66, 81, 105, 128, 157, 177, 190, 204, 222, 240, 259, 275, 294, 318, 329, 345, 361, 377, 407, 424, 444, 458, 471, 479, 504, 532, 543, 554} func (i FeatureFlag) String() string { if i < 0 || i >= FeatureFlag(len(_FeatureFlag_index)-1) { diff --git a/vendor/github.com/letsencrypt/boulder/features/features.go b/vendor/github.com/letsencrypt/boulder/features/features.go index 4608d1d63f..ca4be39ab5 100644 --- a/vendor/github.com/letsencrypt/boulder/features/features.go +++ b/vendor/github.com/letsencrypt/boulder/features/features.go @@ -4,6 +4,7 @@ package features import ( "fmt" + "strings" "sync" ) @@ -18,6 +19,7 @@ const ( StoreIssuerInfo StreamlineOrderAndAuthzs V1DisableNewValidations + ExpirationMailerDontLookTwice // Currently in-use features // Check CAA and respect validationmethods parameter. @@ -77,6 +79,32 @@ const ( // with the certificate's keypair, the cert will be revoked with reason // keyCompromise, regardless of what revocation reason they request. MozRevocationReasons + // OldTLSOutbound allows the VA to negotiate TLS 1.0 and TLS 1.1 during + // HTTPS redirects. When it is set to false, the VA will only connect to + // HTTPS servers that support TLS 1.2 or above. + OldTLSOutbound + // OldTLSInbound controls whether the WFE rejects inbound requests using + // TLS 1.0 and TLS 1.1. Because WFE does not terminate TLS in production, + // we rely on the TLS-Version header (set by our reverse proxy). + OldTLSInbound + // SHA1CSRs controls whether the /acme/finalize endpoint rejects CSRs that + // are self-signed using SHA1. + SHA1CSRs + // AllowUnrecognizedFeatures is internal to the features package: if true, + // skip error when unrecognized feature flag names are passed. + AllowUnrecognizedFeatures + // RejectDuplicateCSRExtensions enables verification that submitted CSRs do + // not contain duplicate extensions. This behavior will be on by default in + // go1.19. + RejectDuplicateCSRExtensions + + // ROCSPStage1 enables querying Redis, live-signing response, and storing + // to Redis, but doesn't serve responses from Redis. + ROCSPStage1 + // ROCSPStage2 enables querying Redis, live-signing a response, and storing + // to Redis, and does serve responses from Redis when appropriate (when + // they are fresh, and agree with MariaDB's status for the certificate). + ROCSPStage2 ) // List of features and their default value, protected by fMu @@ -104,6 +132,14 @@ var features = map[FeatureFlag]bool{ CheckFailedAuthorizationsFirst: false, AllowReRevocation: false, MozRevocationReasons: false, + OldTLSOutbound: true, + OldTLSInbound: true, + SHA1CSRs: true, + AllowUnrecognizedFeatures: false, + ExpirationMailerDontLookTwice: false, + RejectDuplicateCSRExtensions: false, + ROCSPStage1: false, + ROCSPStage2: false, } var fMu = new(sync.RWMutex) @@ -120,17 +156,24 @@ func init() { } // Set accepts a list of features and whether they should -// be enabled or disabled, it will return a error if passed -// a feature name that it doesn't know +// be enabled or disabled. In the presence of unrecognized +// flags, it will return an error or not depending on the +// value of AllowUnrecognizedFeatures. func Set(featureSet map[string]bool) error { fMu.Lock() defer fMu.Unlock() + var unknown []string for n, v := range featureSet { f, present := nameToFeature[n] - if !present { - return fmt.Errorf("feature '%s' doesn't exist", n) + if present { + features[f] = v + } else { + unknown = append(unknown, n) } - features[f] = v + } + if len(unknown) > 0 && !features[AllowUnrecognizedFeatures] { + return fmt.Errorf("unrecognized feature flag names: %s", + strings.Join(unknown, ", ")) } return nil } diff --git a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go index 3457f5b12b..acaab25227 100644 --- a/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go +++ b/vendor/github.com/letsencrypt/boulder/goodkey/blocked.go @@ -10,7 +10,7 @@ import ( "github.com/letsencrypt/boulder/core" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // blockedKeys is a type for maintaining a map of SHA256 hashes diff --git a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go index a5b3f0807a..50f556be01 100644 --- a/vendor/github.com/letsencrypt/boulder/revocation/reasons.go +++ b/vendor/github.com/letsencrypt/boulder/revocation/reasons.go @@ -31,7 +31,6 @@ var ReasonToString = map[Reason]string{ var UserAllowedReasons = map[Reason]struct{}{ ocsp.Unspecified: {}, ocsp.KeyCompromise: {}, - ocsp.AffiliationChanged: {}, ocsp.Superseded: {}, ocsp.CessationOfOperation: {}, } @@ -42,7 +41,6 @@ var UserAllowedReasons = map[Reason]struct{}{ var AdminAllowedReasons = map[Reason]struct{}{ ocsp.Unspecified: {}, ocsp.KeyCompromise: {}, - ocsp.AffiliationChanged: {}, ocsp.Superseded: {}, ocsp.CessationOfOperation: {}, ocsp.PrivilegeWithdrawn: {}, diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go index b88df399a3..6bb28e0f29 100644 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go +++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.6 +// protoc-gen-go v1.28.0 +// protoc v3.20.1 // source: sa.proto package proto @@ -565,6 +565,53 @@ func (x *Count) GetCount() int64 { return 0 } +type Timestamps struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamps []int64 `protobuf:"varint,1,rep,packed,name=timestamps,proto3" json:"timestamps,omitempty"` // Unix timestamp (nanoseconds) +} + +func (x *Timestamps) Reset() { + *x = Timestamps{} + if protoimpl.UnsafeEnabled { + mi := &file_sa_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamps) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamps) ProtoMessage() {} + +func (x *Timestamps) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamps.ProtoReflect.Descriptor instead. +func (*Timestamps) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{10} +} + +func (x *Timestamps) GetTimestamps() []int64 { + if x != nil { + return x.Timestamps + } + return nil +} + type CountCertificatesByNamesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -577,7 +624,7 @@ type CountCertificatesByNamesRequest struct { func (x *CountCertificatesByNamesRequest) Reset() { *x = CountCertificatesByNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[10] + mi := &file_sa_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -590,7 +637,7 @@ func (x *CountCertificatesByNamesRequest) String() string { func (*CountCertificatesByNamesRequest) ProtoMessage() {} func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[10] + mi := &file_sa_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -603,7 +650,7 @@ func (x *CountCertificatesByNamesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CountCertificatesByNamesRequest.ProtoReflect.Descriptor instead. func (*CountCertificatesByNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{10} + return file_sa_proto_rawDescGZIP(), []int{11} } func (x *CountCertificatesByNamesRequest) GetRange() *Range { @@ -631,7 +678,7 @@ type CountByNames struct { func (x *CountByNames) Reset() { *x = CountByNames{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[11] + mi := &file_sa_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -644,7 +691,7 @@ func (x *CountByNames) String() string { func (*CountByNames) ProtoMessage() {} func (x *CountByNames) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[11] + mi := &file_sa_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -657,7 +704,7 @@ func (x *CountByNames) ProtoReflect() protoreflect.Message { // Deprecated: Use CountByNames.ProtoReflect.Descriptor instead. func (*CountByNames) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{11} + return file_sa_proto_rawDescGZIP(), []int{12} } func (x *CountByNames) GetCounts() map[string]int64 { @@ -679,7 +726,7 @@ type CountRegistrationsByIPRequest struct { func (x *CountRegistrationsByIPRequest) Reset() { *x = CountRegistrationsByIPRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[12] + mi := &file_sa_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -692,7 +739,7 @@ func (x *CountRegistrationsByIPRequest) String() string { func (*CountRegistrationsByIPRequest) ProtoMessage() {} func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[12] + mi := &file_sa_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -705,7 +752,7 @@ func (x *CountRegistrationsByIPRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CountRegistrationsByIPRequest.ProtoReflect.Descriptor instead. func (*CountRegistrationsByIPRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{12} + return file_sa_proto_rawDescGZIP(), []int{13} } func (x *CountRegistrationsByIPRequest) GetIp() []byte { @@ -736,7 +783,7 @@ type CountInvalidAuthorizationsRequest struct { func (x *CountInvalidAuthorizationsRequest) Reset() { *x = CountInvalidAuthorizationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[13] + mi := &file_sa_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -749,7 +796,7 @@ func (x *CountInvalidAuthorizationsRequest) String() string { func (*CountInvalidAuthorizationsRequest) ProtoMessage() {} func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[13] + mi := &file_sa_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -762,7 +809,7 @@ func (x *CountInvalidAuthorizationsRequest) ProtoReflect() protoreflect.Message // Deprecated: Use CountInvalidAuthorizationsRequest.ProtoReflect.Descriptor instead. func (*CountInvalidAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{13} + return file_sa_proto_rawDescGZIP(), []int{14} } func (x *CountInvalidAuthorizationsRequest) GetRegistrationID() int64 { @@ -798,7 +845,7 @@ type CountOrdersRequest struct { func (x *CountOrdersRequest) Reset() { *x = CountOrdersRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[14] + mi := &file_sa_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -811,7 +858,7 @@ func (x *CountOrdersRequest) String() string { func (*CountOrdersRequest) ProtoMessage() {} func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[14] + mi := &file_sa_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -824,7 +871,7 @@ func (x *CountOrdersRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CountOrdersRequest.ProtoReflect.Descriptor instead. func (*CountOrdersRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{14} + return file_sa_proto_rawDescGZIP(), []int{15} } func (x *CountOrdersRequest) GetAccountID() int64 { @@ -853,7 +900,7 @@ type CountFQDNSetsRequest struct { func (x *CountFQDNSetsRequest) Reset() { *x = CountFQDNSetsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[15] + mi := &file_sa_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -866,7 +913,7 @@ func (x *CountFQDNSetsRequest) String() string { func (*CountFQDNSetsRequest) ProtoMessage() {} func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[15] + mi := &file_sa_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -879,7 +926,7 @@ func (x *CountFQDNSetsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CountFQDNSetsRequest.ProtoReflect.Descriptor instead. func (*CountFQDNSetsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{15} + return file_sa_proto_rawDescGZIP(), []int{16} } func (x *CountFQDNSetsRequest) GetWindow() int64 { @@ -907,7 +954,7 @@ type FQDNSetExistsRequest struct { func (x *FQDNSetExistsRequest) Reset() { *x = FQDNSetExistsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[16] + mi := &file_sa_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -920,7 +967,7 @@ func (x *FQDNSetExistsRequest) String() string { func (*FQDNSetExistsRequest) ProtoMessage() {} func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[16] + mi := &file_sa_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -933,7 +980,7 @@ func (x *FQDNSetExistsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FQDNSetExistsRequest.ProtoReflect.Descriptor instead. func (*FQDNSetExistsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{16} + return file_sa_proto_rawDescGZIP(), []int{17} } func (x *FQDNSetExistsRequest) GetDomains() []string { @@ -955,7 +1002,7 @@ type PreviousCertificateExistsRequest struct { func (x *PreviousCertificateExistsRequest) Reset() { *x = PreviousCertificateExistsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[17] + mi := &file_sa_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -968,7 +1015,7 @@ func (x *PreviousCertificateExistsRequest) String() string { func (*PreviousCertificateExistsRequest) ProtoMessage() {} func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[17] + mi := &file_sa_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -981,7 +1028,7 @@ func (x *PreviousCertificateExistsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PreviousCertificateExistsRequest.ProtoReflect.Descriptor instead. func (*PreviousCertificateExistsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{17} + return file_sa_proto_rawDescGZIP(), []int{18} } func (x *PreviousCertificateExistsRequest) GetDomain() string { @@ -1009,7 +1056,7 @@ type Exists struct { func (x *Exists) Reset() { *x = Exists{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[18] + mi := &file_sa_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1022,7 +1069,7 @@ func (x *Exists) String() string { func (*Exists) ProtoMessage() {} func (x *Exists) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[18] + mi := &file_sa_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1035,7 +1082,7 @@ func (x *Exists) ProtoReflect() protoreflect.Message { // Deprecated: Use Exists.ProtoReflect.Descriptor instead. func (*Exists) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{18} + return file_sa_proto_rawDescGZIP(), []int{19} } func (x *Exists) GetExists() bool { @@ -1059,7 +1106,7 @@ type AddSerialRequest struct { func (x *AddSerialRequest) Reset() { *x = AddSerialRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[19] + mi := &file_sa_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1072,7 +1119,7 @@ func (x *AddSerialRequest) String() string { func (*AddSerialRequest) ProtoMessage() {} func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[19] + mi := &file_sa_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1085,7 +1132,7 @@ func (x *AddSerialRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddSerialRequest.ProtoReflect.Descriptor instead. func (*AddSerialRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{19} + return file_sa_proto_rawDescGZIP(), []int{20} } func (x *AddSerialRequest) GetRegID() int64 { @@ -1136,7 +1183,7 @@ type AddCertificateRequest struct { func (x *AddCertificateRequest) Reset() { *x = AddCertificateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[20] + mi := &file_sa_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1149,7 +1196,7 @@ func (x *AddCertificateRequest) String() string { func (*AddCertificateRequest) ProtoMessage() {} func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[20] + mi := &file_sa_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1162,7 +1209,7 @@ func (x *AddCertificateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCertificateRequest.ProtoReflect.Descriptor instead. func (*AddCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{20} + return file_sa_proto_rawDescGZIP(), []int{21} } func (x *AddCertificateRequest) GetDer() []byte { @@ -1211,7 +1258,7 @@ type AddCertificateResponse struct { func (x *AddCertificateResponse) Reset() { *x = AddCertificateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[21] + mi := &file_sa_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1224,7 +1271,7 @@ func (x *AddCertificateResponse) String() string { func (*AddCertificateResponse) ProtoMessage() {} func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[21] + mi := &file_sa_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1237,7 +1284,7 @@ func (x *AddCertificateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCertificateResponse.ProtoReflect.Descriptor instead. func (*AddCertificateResponse) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{21} + return file_sa_proto_rawDescGZIP(), []int{22} } func (x *AddCertificateResponse) GetDigest() string { @@ -1258,7 +1305,7 @@ type OrderRequest struct { func (x *OrderRequest) Reset() { *x = OrderRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[22] + mi := &file_sa_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1271,7 +1318,7 @@ func (x *OrderRequest) String() string { func (*OrderRequest) ProtoMessage() {} func (x *OrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[22] + mi := &file_sa_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1284,7 +1331,7 @@ func (x *OrderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use OrderRequest.ProtoReflect.Descriptor instead. func (*OrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{22} + return file_sa_proto_rawDescGZIP(), []int{23} } func (x *OrderRequest) GetId() int64 { @@ -1308,7 +1355,7 @@ type NewOrderRequest struct { func (x *NewOrderRequest) Reset() { *x = NewOrderRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[23] + mi := &file_sa_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1321,7 +1368,7 @@ func (x *NewOrderRequest) String() string { func (*NewOrderRequest) ProtoMessage() {} func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[23] + mi := &file_sa_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1334,7 +1381,7 @@ func (x *NewOrderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use NewOrderRequest.ProtoReflect.Descriptor instead. func (*NewOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{23} + return file_sa_proto_rawDescGZIP(), []int{24} } func (x *NewOrderRequest) GetRegistrationID() int64 { @@ -1377,7 +1424,7 @@ type NewOrderAndAuthzsRequest struct { func (x *NewOrderAndAuthzsRequest) Reset() { *x = NewOrderAndAuthzsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[24] + mi := &file_sa_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1390,7 +1437,7 @@ func (x *NewOrderAndAuthzsRequest) String() string { func (*NewOrderAndAuthzsRequest) ProtoMessage() {} func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[24] + mi := &file_sa_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1403,7 +1450,7 @@ func (x *NewOrderAndAuthzsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use NewOrderAndAuthzsRequest.ProtoReflect.Descriptor instead. func (*NewOrderAndAuthzsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{24} + return file_sa_proto_rawDescGZIP(), []int{25} } func (x *NewOrderAndAuthzsRequest) GetNewOrder() *NewOrderRequest { @@ -1432,7 +1479,7 @@ type SetOrderErrorRequest struct { func (x *SetOrderErrorRequest) Reset() { *x = SetOrderErrorRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[25] + mi := &file_sa_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1445,7 +1492,7 @@ func (x *SetOrderErrorRequest) String() string { func (*SetOrderErrorRequest) ProtoMessage() {} func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[25] + mi := &file_sa_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1458,7 +1505,7 @@ func (x *SetOrderErrorRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetOrderErrorRequest.ProtoReflect.Descriptor instead. func (*SetOrderErrorRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{25} + return file_sa_proto_rawDescGZIP(), []int{26} } func (x *SetOrderErrorRequest) GetId() int64 { @@ -1487,7 +1534,7 @@ type GetValidOrderAuthorizationsRequest struct { func (x *GetValidOrderAuthorizationsRequest) Reset() { *x = GetValidOrderAuthorizationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[26] + mi := &file_sa_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1500,7 +1547,7 @@ func (x *GetValidOrderAuthorizationsRequest) String() string { func (*GetValidOrderAuthorizationsRequest) ProtoMessage() {} func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[26] + mi := &file_sa_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1513,7 +1560,7 @@ func (x *GetValidOrderAuthorizationsRequest) ProtoReflect() protoreflect.Message // Deprecated: Use GetValidOrderAuthorizationsRequest.ProtoReflect.Descriptor instead. func (*GetValidOrderAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{26} + return file_sa_proto_rawDescGZIP(), []int{27} } func (x *GetValidOrderAuthorizationsRequest) GetId() int64 { @@ -1542,7 +1589,7 @@ type GetOrderForNamesRequest struct { func (x *GetOrderForNamesRequest) Reset() { *x = GetOrderForNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[27] + mi := &file_sa_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1555,7 +1602,7 @@ func (x *GetOrderForNamesRequest) String() string { func (*GetOrderForNamesRequest) ProtoMessage() {} func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[27] + mi := &file_sa_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1568,7 +1615,7 @@ func (x *GetOrderForNamesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetOrderForNamesRequest.ProtoReflect.Descriptor instead. func (*GetOrderForNamesRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{27} + return file_sa_proto_rawDescGZIP(), []int{28} } func (x *GetOrderForNamesRequest) GetAcctID() int64 { @@ -1597,7 +1644,7 @@ type FinalizeOrderRequest struct { func (x *FinalizeOrderRequest) Reset() { *x = FinalizeOrderRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[28] + mi := &file_sa_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1610,7 +1657,7 @@ func (x *FinalizeOrderRequest) String() string { func (*FinalizeOrderRequest) ProtoMessage() {} func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[28] + mi := &file_sa_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1623,7 +1670,7 @@ func (x *FinalizeOrderRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FinalizeOrderRequest.ProtoReflect.Descriptor instead. func (*FinalizeOrderRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{28} + return file_sa_proto_rawDescGZIP(), []int{29} } func (x *FinalizeOrderRequest) GetId() int64 { @@ -1653,7 +1700,7 @@ type GetAuthorizationsRequest struct { func (x *GetAuthorizationsRequest) Reset() { *x = GetAuthorizationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[29] + mi := &file_sa_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1666,7 +1713,7 @@ func (x *GetAuthorizationsRequest) String() string { func (*GetAuthorizationsRequest) ProtoMessage() {} func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[29] + mi := &file_sa_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1679,7 +1726,7 @@ func (x *GetAuthorizationsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAuthorizationsRequest.ProtoReflect.Descriptor instead. func (*GetAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{29} + return file_sa_proto_rawDescGZIP(), []int{30} } func (x *GetAuthorizationsRequest) GetRegistrationID() int64 { @@ -1714,7 +1761,7 @@ type Authorizations struct { func (x *Authorizations) Reset() { *x = Authorizations{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[30] + mi := &file_sa_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1727,7 +1774,7 @@ func (x *Authorizations) String() string { func (*Authorizations) ProtoMessage() {} func (x *Authorizations) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[30] + mi := &file_sa_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1740,7 +1787,7 @@ func (x *Authorizations) ProtoReflect() protoreflect.Message { // Deprecated: Use Authorizations.ProtoReflect.Descriptor instead. func (*Authorizations) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{30} + return file_sa_proto_rawDescGZIP(), []int{31} } func (x *Authorizations) GetAuthz() []*Authorizations_MapElement { @@ -1761,7 +1808,7 @@ type AddPendingAuthorizationsRequest struct { func (x *AddPendingAuthorizationsRequest) Reset() { *x = AddPendingAuthorizationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[31] + mi := &file_sa_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1774,7 +1821,7 @@ func (x *AddPendingAuthorizationsRequest) String() string { func (*AddPendingAuthorizationsRequest) ProtoMessage() {} func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[31] + mi := &file_sa_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1787,7 +1834,7 @@ func (x *AddPendingAuthorizationsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddPendingAuthorizationsRequest.ProtoReflect.Descriptor instead. func (*AddPendingAuthorizationsRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{31} + return file_sa_proto_rawDescGZIP(), []int{32} } func (x *AddPendingAuthorizationsRequest) GetAuthz() []*proto.Authorization { @@ -1808,7 +1855,7 @@ type AuthorizationIDs struct { func (x *AuthorizationIDs) Reset() { *x = AuthorizationIDs{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[32] + mi := &file_sa_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1821,7 +1868,7 @@ func (x *AuthorizationIDs) String() string { func (*AuthorizationIDs) ProtoMessage() {} func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[32] + mi := &file_sa_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1834,7 +1881,7 @@ func (x *AuthorizationIDs) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthorizationIDs.ProtoReflect.Descriptor instead. func (*AuthorizationIDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{32} + return file_sa_proto_rawDescGZIP(), []int{33} } func (x *AuthorizationIDs) GetIds() []string { @@ -1855,7 +1902,7 @@ type AuthorizationID2 struct { func (x *AuthorizationID2) Reset() { *x = AuthorizationID2{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[33] + mi := &file_sa_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1868,7 +1915,7 @@ func (x *AuthorizationID2) String() string { func (*AuthorizationID2) ProtoMessage() {} func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[33] + mi := &file_sa_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1881,7 +1928,7 @@ func (x *AuthorizationID2) ProtoReflect() protoreflect.Message { // Deprecated: Use AuthorizationID2.ProtoReflect.Descriptor instead. func (*AuthorizationID2) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{33} + return file_sa_proto_rawDescGZIP(), []int{34} } func (x *AuthorizationID2) GetId() int64 { @@ -1902,7 +1949,7 @@ type Authorization2IDs struct { func (x *Authorization2IDs) Reset() { *x = Authorization2IDs{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[34] + mi := &file_sa_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1915,7 +1962,7 @@ func (x *Authorization2IDs) String() string { func (*Authorization2IDs) ProtoMessage() {} func (x *Authorization2IDs) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[34] + mi := &file_sa_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1928,7 +1975,7 @@ func (x *Authorization2IDs) ProtoReflect() protoreflect.Message { // Deprecated: Use Authorization2IDs.ProtoReflect.Descriptor instead. func (*Authorization2IDs) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{34} + return file_sa_proto_rawDescGZIP(), []int{35} } func (x *Authorization2IDs) GetIds() []int64 { @@ -1948,12 +1995,13 @@ type RevokeCertificateRequest struct { Date int64 `protobuf:"varint,3,opt,name=date,proto3" json:"date,omitempty"` // Unix timestamp (nanoseconds) Backdate int64 `protobuf:"varint,5,opt,name=backdate,proto3" json:"backdate,omitempty"` // Unix timestamp (nanoseconds) Response []byte `protobuf:"bytes,4,opt,name=response,proto3" json:"response,omitempty"` + IssuerID int64 `protobuf:"varint,6,opt,name=issuerID,proto3" json:"issuerID,omitempty"` } func (x *RevokeCertificateRequest) Reset() { *x = RevokeCertificateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[35] + mi := &file_sa_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1966,7 +2014,7 @@ func (x *RevokeCertificateRequest) String() string { func (*RevokeCertificateRequest) ProtoMessage() {} func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[35] + mi := &file_sa_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1979,7 +2027,7 @@ func (x *RevokeCertificateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RevokeCertificateRequest.ProtoReflect.Descriptor instead. func (*RevokeCertificateRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{35} + return file_sa_proto_rawDescGZIP(), []int{36} } func (x *RevokeCertificateRequest) GetSerial() string { @@ -2017,6 +2065,13 @@ func (x *RevokeCertificateRequest) GetResponse() []byte { return nil } +func (x *RevokeCertificateRequest) GetIssuerID() int64 { + if x != nil { + return x.IssuerID + } + return 0 +} + type FinalizeAuthorizationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2034,7 +2089,7 @@ type FinalizeAuthorizationRequest struct { func (x *FinalizeAuthorizationRequest) Reset() { *x = FinalizeAuthorizationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[36] + mi := &file_sa_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2047,7 +2102,7 @@ func (x *FinalizeAuthorizationRequest) String() string { func (*FinalizeAuthorizationRequest) ProtoMessage() {} func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[36] + mi := &file_sa_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2060,7 +2115,7 @@ func (x *FinalizeAuthorizationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FinalizeAuthorizationRequest.ProtoReflect.Descriptor instead. func (*FinalizeAuthorizationRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{36} + return file_sa_proto_rawDescGZIP(), []int{37} } func (x *FinalizeAuthorizationRequest) GetId() int64 { @@ -2127,7 +2182,7 @@ type AddBlockedKeyRequest struct { func (x *AddBlockedKeyRequest) Reset() { *x = AddBlockedKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[37] + mi := &file_sa_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2140,7 +2195,7 @@ func (x *AddBlockedKeyRequest) String() string { func (*AddBlockedKeyRequest) ProtoMessage() {} func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[37] + mi := &file_sa_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2153,7 +2208,7 @@ func (x *AddBlockedKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddBlockedKeyRequest.ProtoReflect.Descriptor instead. func (*AddBlockedKeyRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{37} + return file_sa_proto_rawDescGZIP(), []int{38} } func (x *AddBlockedKeyRequest) GetKeyHash() []byte { @@ -2202,7 +2257,7 @@ type KeyBlockedRequest struct { func (x *KeyBlockedRequest) Reset() { *x = KeyBlockedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[38] + mi := &file_sa_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2215,7 +2270,7 @@ func (x *KeyBlockedRequest) String() string { func (*KeyBlockedRequest) ProtoMessage() {} func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[38] + mi := &file_sa_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2228,7 +2283,7 @@ func (x *KeyBlockedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyBlockedRequest.ProtoReflect.Descriptor instead. func (*KeyBlockedRequest) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{38} + return file_sa_proto_rawDescGZIP(), []int{39} } func (x *KeyBlockedRequest) GetKeyHash() []byte { @@ -2238,32 +2293,35 @@ func (x *KeyBlockedRequest) GetKeyHash() []byte { return nil } -type ValidAuthorizations_MapElement struct { +type Incident struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + SerialTable string `protobuf:"bytes,2,opt,name=serialTable,proto3" json:"serialTable,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + RenewBy int64 `protobuf:"varint,4,opt,name=renewBy,proto3" json:"renewBy,omitempty"` // Unix timestamp (nanoseconds) + Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"` } -func (x *ValidAuthorizations_MapElement) Reset() { - *x = ValidAuthorizations_MapElement{} +func (x *Incident) Reset() { + *x = Incident{} if protoimpl.UnsafeEnabled { - mi := &file_sa_proto_msgTypes[39] + mi := &file_sa_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ValidAuthorizations_MapElement) String() string { +func (x *Incident) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidAuthorizations_MapElement) ProtoMessage() {} +func (*Incident) ProtoMessage() {} -func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[39] +func (x *Incident) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2274,36 +2332,56 @@ func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead. -func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{5, 0} +// Deprecated: Use Incident.ProtoReflect.Descriptor instead. +func (*Incident) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{40} } -func (x *ValidAuthorizations_MapElement) GetDomain() string { +func (x *Incident) GetId() int64 { if x != nil { - return x.Domain + return x.Id + } + return 0 +} + +func (x *Incident) GetSerialTable() string { + if x != nil { + return x.SerialTable } return "" } -func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization { +func (x *Incident) GetUrl() string { if x != nil { - return x.Authz + return x.Url } - return nil + return "" } -type Authorizations_MapElement struct { +func (x *Incident) GetRenewBy() int64 { + if x != nil { + return x.RenewBy + } + return 0 +} + +func (x *Incident) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +type SerialsForIncidentRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` - Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` + IncidentTable string `protobuf:"bytes,1,opt,name=incidentTable,proto3" json:"incidentTable,omitempty"` } -func (x *Authorizations_MapElement) Reset() { - *x = Authorizations_MapElement{} +func (x *SerialsForIncidentRequest) Reset() { + *x = SerialsForIncidentRequest{} if protoimpl.UnsafeEnabled { mi := &file_sa_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2311,14 +2389,259 @@ func (x *Authorizations_MapElement) Reset() { } } -func (x *Authorizations_MapElement) String() string { +func (x *SerialsForIncidentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SerialsForIncidentRequest) ProtoMessage() {} + +func (x *SerialsForIncidentRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SerialsForIncidentRequest.ProtoReflect.Descriptor instead. +func (*SerialsForIncidentRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{41} +} + +func (x *SerialsForIncidentRequest) GetIncidentTable() string { + if x != nil { + return x.IncidentTable + } + return "" +} + +type IncidentSerial struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Serial string `protobuf:"bytes,1,opt,name=serial,proto3" json:"serial,omitempty"` + RegistrationID int64 `protobuf:"varint,2,opt,name=registrationID,proto3" json:"registrationID,omitempty"` + OrderID int64 `protobuf:"varint,3,opt,name=orderID,proto3" json:"orderID,omitempty"` + LastNoticeSent int64 `protobuf:"varint,4,opt,name=lastNoticeSent,proto3" json:"lastNoticeSent,omitempty"` // Unix timestamp (nanoseconds) +} + +func (x *IncidentSerial) Reset() { + *x = IncidentSerial{} + if protoimpl.UnsafeEnabled { + mi := &file_sa_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IncidentSerial) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IncidentSerial) ProtoMessage() {} + +func (x *IncidentSerial) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IncidentSerial.ProtoReflect.Descriptor instead. +func (*IncidentSerial) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{42} +} + +func (x *IncidentSerial) GetSerial() string { + if x != nil { + return x.Serial + } + return "" +} + +func (x *IncidentSerial) GetRegistrationID() int64 { + if x != nil { + return x.RegistrationID + } + return 0 +} + +func (x *IncidentSerial) GetOrderID() int64 { + if x != nil { + return x.OrderID + } + return 0 +} + +func (x *IncidentSerial) GetLastNoticeSent() int64 { + if x != nil { + return x.LastNoticeSent + } + return 0 +} + +type GetRevokedCertsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IssuerNameID int64 `protobuf:"varint,1,opt,name=issuerNameID,proto3" json:"issuerNameID,omitempty"` + ExpiresAfter int64 `protobuf:"varint,2,opt,name=expiresAfter,proto3" json:"expiresAfter,omitempty"` // Unix timestamp (nanoseconds), inclusive + ExpiresBefore int64 `protobuf:"varint,3,opt,name=expiresBefore,proto3" json:"expiresBefore,omitempty"` // Unix timestamp (nanoseconds), exclusive + RevokedBefore int64 `protobuf:"varint,4,opt,name=revokedBefore,proto3" json:"revokedBefore,omitempty"` // Unix timestamp (nanoseconds) +} + +func (x *GetRevokedCertsRequest) Reset() { + *x = GetRevokedCertsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_sa_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRevokedCertsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRevokedCertsRequest) ProtoMessage() {} + +func (x *GetRevokedCertsRequest) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRevokedCertsRequest.ProtoReflect.Descriptor instead. +func (*GetRevokedCertsRequest) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{43} +} + +func (x *GetRevokedCertsRequest) GetIssuerNameID() int64 { + if x != nil { + return x.IssuerNameID + } + return 0 +} + +func (x *GetRevokedCertsRequest) GetExpiresAfter() int64 { + if x != nil { + return x.ExpiresAfter + } + return 0 +} + +func (x *GetRevokedCertsRequest) GetExpiresBefore() int64 { + if x != nil { + return x.ExpiresBefore + } + return 0 +} + +func (x *GetRevokedCertsRequest) GetRevokedBefore() int64 { + if x != nil { + return x.RevokedBefore + } + return 0 +} + +type ValidAuthorizations_MapElement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` +} + +func (x *ValidAuthorizations_MapElement) Reset() { + *x = ValidAuthorizations_MapElement{} + if protoimpl.UnsafeEnabled { + mi := &file_sa_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidAuthorizations_MapElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidAuthorizations_MapElement) ProtoMessage() {} + +func (x *ValidAuthorizations_MapElement) ProtoReflect() protoreflect.Message { + mi := &file_sa_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidAuthorizations_MapElement.ProtoReflect.Descriptor instead. +func (*ValidAuthorizations_MapElement) Descriptor() ([]byte, []int) { + return file_sa_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *ValidAuthorizations_MapElement) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ValidAuthorizations_MapElement) GetAuthz() *proto.Authorization { + if x != nil { + return x.Authz + } + return nil +} + +type Authorizations_MapElement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty"` + Authz *proto.Authorization `protobuf:"bytes,2,opt,name=authz,proto3" json:"authz,omitempty"` +} + +func (x *Authorizations_MapElement) Reset() { + *x = Authorizations_MapElement{} + if protoimpl.UnsafeEnabled { + mi := &file_sa_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Authorizations_MapElement) String() string { return protoimpl.X.MessageStringOf(x) } func (*Authorizations_MapElement) ProtoMessage() {} func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message { - mi := &file_sa_proto_msgTypes[41] + mi := &file_sa_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2331,7 +2654,7 @@ func (x *Authorizations_MapElement) ProtoReflect() protoreflect.Message { // Deprecated: Use Authorizations_MapElement.ProtoReflect.Descriptor instead. func (*Authorizations_MapElement) Descriptor() ([]byte, []int) { - return file_sa_proto_rawDescGZIP(), []int{30, 0} + return file_sa_proto_rawDescGZIP(), []int{31, 0} } func (x *Authorizations_MapElement) GetDomain() string { @@ -2407,227 +2730,269 @@ var file_sa_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x22, 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7f, 0x0a, 0x0c, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, - 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, - 0x01, 0x0a, 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, - 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, - 0x0a, 0x14, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, - 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, - 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, - 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, - 0x0a, 0x10, 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, - 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, - 0x0a, 0x03, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, - 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, - 0x0a, 0x16, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x22, 0x1e, 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x95, 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, - 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, - 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, - 0x7a, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, - 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, - 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, + 0x74, 0x22, 0x2c, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, + 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x22, + 0x58, 0x0a, 0x1f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x7f, 0x0a, 0x0c, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x73, 0x61, 0x2e, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x1a, + 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1d, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x70, 0x12, 0x1f, 0x0a, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x88, 0x01, 0x0a, + 0x21, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, - 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, - 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, - 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, - 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, - 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, - 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, - 0x7a, 0x1a, 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, - 0x68, 0x7a, 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, + 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x53, 0x0a, 0x12, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x1f, 0x0a, 0x05, 0x72, + 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x73, 0x61, 0x2e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x48, 0x0a, 0x14, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x0a, 0x07, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x30, 0x0a, 0x14, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, + 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x20, 0x50, 0x72, 0x65, 0x76, + 0x69, 0x6f, 0x75, 0x73, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x22, 0x20, 0x0a, 0x06, 0x45, 0x78, + 0x69, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x10, + 0x41, 0x64, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x72, 0x65, 0x67, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x72, + 0x65, 0x67, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x6f, 0x63, 0x73, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x49, 0x44, 0x22, 0x30, 0x0a, 0x16, + 0x41, 0x64, 0x64, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x1e, + 0x0a, 0x0c, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x95, + 0x01, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x76, 0x32, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x03, 0x52, 0x10, 0x76, 0x32, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x7e, 0x0a, 0x18, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x41, 0x6e, 0x64, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x61, 0x2e, 0x4e, 0x65, 0x77, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x4f, 0x72, + 0x64, 0x65, 0x72, 0x12, 0x31, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6e, 0x65, 0x77, + 0x41, 0x75, 0x74, 0x68, 0x7a, 0x73, 0x22, 0x52, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x4c, 0x0a, 0x22, 0x47, 0x65, + 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x46, 0x6f, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x61, 0x63, 0x63, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x22, 0x54, 0x0a, 0x14, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x22, 0x6e, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x64, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x96, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x61, 0x75, + 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x61, + 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x1a, + 0x4f, 0x0a, 0x0a, 0x4d, 0x61, 0x70, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, - 0x22, 0x24, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, - 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, - 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, - 0x73, 0x22, 0x96, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa6, 0x02, 0x0a, 0x1c, 0x46, - 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, - 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, - 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, - 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x22, 0x2d, 0x0a, 0x11, - 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x32, 0xcd, 0x15, 0x0a, 0x10, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a, - 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57, - 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x12, 0x35, 0x0a, 0x11, 0x47, - 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, - 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, - 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, + 0x22, 0x4c, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x61, 0x75, 0x74, 0x68, 0x7a, 0x22, 0x24, + 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x03, 0x69, 0x64, 0x73, 0x22, 0x22, 0x0a, 0x10, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, + 0xb2, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, + 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x73, 0x73, 0x75, + 0x65, 0x72, 0x49, 0x44, 0x22, 0xa6, 0x02, 0x0a, 0x1c, 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x65, 0x6d, + 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x65, 0x64, 0x12, 0x44, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x3e, 0x0a, 0x0f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x62, + 0x6c, 0x65, 0x6d, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x61, + 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x96, 0x01, + 0x0a, 0x14, 0x41, 0x64, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x6f, + 0x6b, 0x65, 0x64, 0x42, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x79, 0x22, 0x2d, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6b, + 0x65, 0x79, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, + 0x79, 0x48, 0x61, 0x73, 0x68, 0x22, 0x82, 0x01, 0x0a, 0x08, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x41, 0x0a, 0x19, 0x53, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x69, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x92, 0x01, + 0x0a, 0x0e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, + 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x6c, 0x61, + 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x63, 0x65, 0x53, 0x65, + 0x6e, 0x74, 0x22, 0xac, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, + 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, + 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0c, 0x69, 0x73, 0x73, 0x75, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x49, + 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x72, + 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x32, 0xa7, 0x17, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x1a, 0x12, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x2e, 0x73, 0x61, + 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x57, 0x65, 0x62, 0x4b, 0x65, 0x79, 0x1a, 0x12, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x00, 0x12, 0x35, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x1a, 0x12, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, - 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, - 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, - 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, - 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, - 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x32, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, - 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x00, 0x12, 0x34, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x50, 0x72, 0x65, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x11, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, + 0x00, 0x12, 0x3d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x1a, 0x17, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, + 0x12, 0x53, 0x0a, 0x18, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x73, + 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x73, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x79, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x16, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x12, + 0x21, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, + 0x4d, 0x0a, 0x1b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x21, + 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x79, 0x49, 0x50, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x32, + 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, + 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x22, 0x00, 0x12, 0x36, 0x0a, 0x0d, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, + 0x65, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x51, + 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x09, 0x2e, + 0x73, 0x61, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x1a, 0x46, 0x51, + 0x44, 0x4e, 0x53, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x46, + 0x6f, 0x72, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x73, 0x61, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x22, 0x00, 0x12, 0x37, 0x0a, 0x0d, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x73, 0x61, 0x2e, 0x46, 0x51, 0x44, 0x4e, 0x53, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4f, 0x0a, @@ -2673,7 +3038,16 @@ var file_sa_proto_rawDesc = []byte{ 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x00, 0x12, 0x31, 0x0a, 0x0a, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x15, 0x2e, 0x73, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0a, 0x2e, 0x73, 0x61, - 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, + 0x2e, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x12, 0x53, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, + 0x1d, 0x2e, 0x73, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x6f, 0x72, 0x49, + 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, + 0x2e, 0x73, 0x61, 0x2e, 0x49, 0x6e, 0x63, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x22, 0x00, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x65, 0x76, + 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x73, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x76, 0x6f, 0x6b, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x52, 0x4c, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, 0x3b, 0x0a, 0x0f, 0x4e, 0x65, 0x77, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x12, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x61, @@ -2773,7 +3147,7 @@ func file_sa_proto_rawDescGZIP() []byte { return file_sa_proto_rawDescData } -var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_sa_proto_msgTypes = make([]protoimpl.MessageInfo, 47) var file_sa_proto_goTypes = []interface{}{ (*RegistrationID)(nil), // 0: sa.RegistrationID (*JSONWebKey)(nil), // 1: sa.JSONWebKey @@ -2785,145 +3159,157 @@ var file_sa_proto_goTypes = []interface{}{ (*SerialMetadata)(nil), // 7: sa.SerialMetadata (*Range)(nil), // 8: sa.Range (*Count)(nil), // 9: sa.Count - (*CountCertificatesByNamesRequest)(nil), // 10: sa.CountCertificatesByNamesRequest - (*CountByNames)(nil), // 11: sa.CountByNames - (*CountRegistrationsByIPRequest)(nil), // 12: sa.CountRegistrationsByIPRequest - (*CountInvalidAuthorizationsRequest)(nil), // 13: sa.CountInvalidAuthorizationsRequest - (*CountOrdersRequest)(nil), // 14: sa.CountOrdersRequest - (*CountFQDNSetsRequest)(nil), // 15: sa.CountFQDNSetsRequest - (*FQDNSetExistsRequest)(nil), // 16: sa.FQDNSetExistsRequest - (*PreviousCertificateExistsRequest)(nil), // 17: sa.PreviousCertificateExistsRequest - (*Exists)(nil), // 18: sa.Exists - (*AddSerialRequest)(nil), // 19: sa.AddSerialRequest - (*AddCertificateRequest)(nil), // 20: sa.AddCertificateRequest - (*AddCertificateResponse)(nil), // 21: sa.AddCertificateResponse - (*OrderRequest)(nil), // 22: sa.OrderRequest - (*NewOrderRequest)(nil), // 23: sa.NewOrderRequest - (*NewOrderAndAuthzsRequest)(nil), // 24: sa.NewOrderAndAuthzsRequest - (*SetOrderErrorRequest)(nil), // 25: sa.SetOrderErrorRequest - (*GetValidOrderAuthorizationsRequest)(nil), // 26: sa.GetValidOrderAuthorizationsRequest - (*GetOrderForNamesRequest)(nil), // 27: sa.GetOrderForNamesRequest - (*FinalizeOrderRequest)(nil), // 28: sa.FinalizeOrderRequest - (*GetAuthorizationsRequest)(nil), // 29: sa.GetAuthorizationsRequest - (*Authorizations)(nil), // 30: sa.Authorizations - (*AddPendingAuthorizationsRequest)(nil), // 31: sa.AddPendingAuthorizationsRequest - (*AuthorizationIDs)(nil), // 32: sa.AuthorizationIDs - (*AuthorizationID2)(nil), // 33: sa.AuthorizationID2 - (*Authorization2IDs)(nil), // 34: sa.Authorization2IDs - (*RevokeCertificateRequest)(nil), // 35: sa.RevokeCertificateRequest - (*FinalizeAuthorizationRequest)(nil), // 36: sa.FinalizeAuthorizationRequest - (*AddBlockedKeyRequest)(nil), // 37: sa.AddBlockedKeyRequest - (*KeyBlockedRequest)(nil), // 38: sa.KeyBlockedRequest - (*ValidAuthorizations_MapElement)(nil), // 39: sa.ValidAuthorizations.MapElement - nil, // 40: sa.CountByNames.CountsEntry - (*Authorizations_MapElement)(nil), // 41: sa.Authorizations.MapElement - (*proto.Authorization)(nil), // 42: core.Authorization - (*proto.ProblemDetails)(nil), // 43: core.ProblemDetails - (*proto.ValidationRecord)(nil), // 44: core.ValidationRecord - (*proto.Registration)(nil), // 45: core.Registration - (*proto.Certificate)(nil), // 46: core.Certificate - (*proto.CertificateStatus)(nil), // 47: core.CertificateStatus - (*emptypb.Empty)(nil), // 48: google.protobuf.Empty - (*proto.Order)(nil), // 49: core.Order + (*Timestamps)(nil), // 10: sa.Timestamps + (*CountCertificatesByNamesRequest)(nil), // 11: sa.CountCertificatesByNamesRequest + (*CountByNames)(nil), // 12: sa.CountByNames + (*CountRegistrationsByIPRequest)(nil), // 13: sa.CountRegistrationsByIPRequest + (*CountInvalidAuthorizationsRequest)(nil), // 14: sa.CountInvalidAuthorizationsRequest + (*CountOrdersRequest)(nil), // 15: sa.CountOrdersRequest + (*CountFQDNSetsRequest)(nil), // 16: sa.CountFQDNSetsRequest + (*FQDNSetExistsRequest)(nil), // 17: sa.FQDNSetExistsRequest + (*PreviousCertificateExistsRequest)(nil), // 18: sa.PreviousCertificateExistsRequest + (*Exists)(nil), // 19: sa.Exists + (*AddSerialRequest)(nil), // 20: sa.AddSerialRequest + (*AddCertificateRequest)(nil), // 21: sa.AddCertificateRequest + (*AddCertificateResponse)(nil), // 22: sa.AddCertificateResponse + (*OrderRequest)(nil), // 23: sa.OrderRequest + (*NewOrderRequest)(nil), // 24: sa.NewOrderRequest + (*NewOrderAndAuthzsRequest)(nil), // 25: sa.NewOrderAndAuthzsRequest + (*SetOrderErrorRequest)(nil), // 26: sa.SetOrderErrorRequest + (*GetValidOrderAuthorizationsRequest)(nil), // 27: sa.GetValidOrderAuthorizationsRequest + (*GetOrderForNamesRequest)(nil), // 28: sa.GetOrderForNamesRequest + (*FinalizeOrderRequest)(nil), // 29: sa.FinalizeOrderRequest + (*GetAuthorizationsRequest)(nil), // 30: sa.GetAuthorizationsRequest + (*Authorizations)(nil), // 31: sa.Authorizations + (*AddPendingAuthorizationsRequest)(nil), // 32: sa.AddPendingAuthorizationsRequest + (*AuthorizationIDs)(nil), // 33: sa.AuthorizationIDs + (*AuthorizationID2)(nil), // 34: sa.AuthorizationID2 + (*Authorization2IDs)(nil), // 35: sa.Authorization2IDs + (*RevokeCertificateRequest)(nil), // 36: sa.RevokeCertificateRequest + (*FinalizeAuthorizationRequest)(nil), // 37: sa.FinalizeAuthorizationRequest + (*AddBlockedKeyRequest)(nil), // 38: sa.AddBlockedKeyRequest + (*KeyBlockedRequest)(nil), // 39: sa.KeyBlockedRequest + (*Incident)(nil), // 40: sa.Incident + (*SerialsForIncidentRequest)(nil), // 41: sa.SerialsForIncidentRequest + (*IncidentSerial)(nil), // 42: sa.IncidentSerial + (*GetRevokedCertsRequest)(nil), // 43: sa.GetRevokedCertsRequest + (*ValidAuthorizations_MapElement)(nil), // 44: sa.ValidAuthorizations.MapElement + nil, // 45: sa.CountByNames.CountsEntry + (*Authorizations_MapElement)(nil), // 46: sa.Authorizations.MapElement + (*proto.Authorization)(nil), // 47: core.Authorization + (*proto.ProblemDetails)(nil), // 48: core.ProblemDetails + (*proto.ValidationRecord)(nil), // 49: core.ValidationRecord + (*proto.Registration)(nil), // 50: core.Registration + (*proto.Certificate)(nil), // 51: core.Certificate + (*proto.CertificateStatus)(nil), // 52: core.CertificateStatus + (*proto.CRLEntry)(nil), // 53: core.CRLEntry + (*emptypb.Empty)(nil), // 54: google.protobuf.Empty + (*proto.Order)(nil), // 55: core.Order } var file_sa_proto_depIdxs = []int32{ - 39, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement + 44, // 0: sa.ValidAuthorizations.valid:type_name -> sa.ValidAuthorizations.MapElement 8, // 1: sa.CountCertificatesByNamesRequest.range:type_name -> sa.Range - 40, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry + 45, // 2: sa.CountByNames.counts:type_name -> sa.CountByNames.CountsEntry 8, // 3: sa.CountRegistrationsByIPRequest.range:type_name -> sa.Range 8, // 4: sa.CountInvalidAuthorizationsRequest.range:type_name -> sa.Range 8, // 5: sa.CountOrdersRequest.range:type_name -> sa.Range - 23, // 6: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest - 42, // 7: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization - 43, // 8: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails - 41, // 9: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement - 42, // 10: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization - 44, // 11: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord - 43, // 12: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails - 42, // 13: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization - 42, // 14: sa.Authorizations.MapElement.authz:type_name -> core.Authorization + 24, // 6: sa.NewOrderAndAuthzsRequest.newOrder:type_name -> sa.NewOrderRequest + 47, // 7: sa.NewOrderAndAuthzsRequest.newAuthzs:type_name -> core.Authorization + 48, // 8: sa.SetOrderErrorRequest.error:type_name -> core.ProblemDetails + 46, // 9: sa.Authorizations.authz:type_name -> sa.Authorizations.MapElement + 47, // 10: sa.AddPendingAuthorizationsRequest.authz:type_name -> core.Authorization + 49, // 11: sa.FinalizeAuthorizationRequest.validationRecords:type_name -> core.ValidationRecord + 48, // 12: sa.FinalizeAuthorizationRequest.validationError:type_name -> core.ProblemDetails + 47, // 13: sa.ValidAuthorizations.MapElement.authz:type_name -> core.Authorization + 47, // 14: sa.Authorizations.MapElement.authz:type_name -> core.Authorization 0, // 15: sa.StorageAuthority.GetRegistration:input_type -> sa.RegistrationID 1, // 16: sa.StorageAuthority.GetRegistrationByKey:input_type -> sa.JSONWebKey 6, // 17: sa.StorageAuthority.GetSerialMetadata:input_type -> sa.Serial 6, // 18: sa.StorageAuthority.GetCertificate:input_type -> sa.Serial 6, // 19: sa.StorageAuthority.GetPrecertificate:input_type -> sa.Serial 6, // 20: sa.StorageAuthority.GetCertificateStatus:input_type -> sa.Serial - 10, // 21: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest - 12, // 22: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest - 12, // 23: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest - 14, // 24: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest - 15, // 25: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest - 16, // 26: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest - 17, // 27: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest - 33, // 28: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2 - 29, // 29: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest - 3, // 30: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest - 0, // 31: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID - 26, // 32: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest - 13, // 33: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest - 4, // 34: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest - 38, // 35: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest - 45, // 36: sa.StorageAuthority.NewRegistration:input_type -> core.Registration - 45, // 37: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration - 20, // 38: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest - 20, // 39: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest - 19, // 40: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest - 0, // 41: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID - 23, // 42: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest - 24, // 43: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest - 22, // 44: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest - 25, // 45: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest - 28, // 46: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest - 22, // 47: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest - 27, // 48: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest - 35, // 49: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest - 35, // 50: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest - 31, // 51: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest - 36, // 52: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest - 33, // 53: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2 - 37, // 54: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest - 45, // 55: sa.StorageAuthority.GetRegistration:output_type -> core.Registration - 45, // 56: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration - 7, // 57: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata - 46, // 58: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate - 46, // 59: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate - 47, // 60: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus - 11, // 61: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames - 9, // 62: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count - 9, // 63: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count - 9, // 64: sa.StorageAuthority.CountOrders:output_type -> sa.Count - 9, // 65: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count - 18, // 66: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists - 18, // 67: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists - 42, // 68: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization - 30, // 69: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations - 42, // 70: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization - 9, // 71: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count - 30, // 72: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations - 9, // 73: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count - 30, // 74: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations - 18, // 75: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists - 45, // 76: sa.StorageAuthority.NewRegistration:output_type -> core.Registration - 48, // 77: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty - 21, // 78: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse - 48, // 79: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty - 48, // 80: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty - 48, // 81: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty - 49, // 82: sa.StorageAuthority.NewOrder:output_type -> core.Order - 49, // 83: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order - 48, // 84: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty - 48, // 85: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty - 48, // 86: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty - 49, // 87: sa.StorageAuthority.GetOrder:output_type -> core.Order - 49, // 88: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order - 48, // 89: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty - 48, // 90: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty - 34, // 91: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs - 48, // 92: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty - 48, // 93: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty - 48, // 94: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty - 55, // [55:95] is the sub-list for method output_type - 15, // [15:55] is the sub-list for method input_type + 11, // 21: sa.StorageAuthority.CountCertificatesByNames:input_type -> sa.CountCertificatesByNamesRequest + 13, // 22: sa.StorageAuthority.CountRegistrationsByIP:input_type -> sa.CountRegistrationsByIPRequest + 13, // 23: sa.StorageAuthority.CountRegistrationsByIPRange:input_type -> sa.CountRegistrationsByIPRequest + 15, // 24: sa.StorageAuthority.CountOrders:input_type -> sa.CountOrdersRequest + 16, // 25: sa.StorageAuthority.CountFQDNSets:input_type -> sa.CountFQDNSetsRequest + 16, // 26: sa.StorageAuthority.FQDNSetTimestampsForWindow:input_type -> sa.CountFQDNSetsRequest + 17, // 27: sa.StorageAuthority.FQDNSetExists:input_type -> sa.FQDNSetExistsRequest + 18, // 28: sa.StorageAuthority.PreviousCertificateExists:input_type -> sa.PreviousCertificateExistsRequest + 34, // 29: sa.StorageAuthority.GetAuthorization2:input_type -> sa.AuthorizationID2 + 30, // 30: sa.StorageAuthority.GetAuthorizations2:input_type -> sa.GetAuthorizationsRequest + 3, // 31: sa.StorageAuthority.GetPendingAuthorization2:input_type -> sa.GetPendingAuthorizationRequest + 0, // 32: sa.StorageAuthority.CountPendingAuthorizations2:input_type -> sa.RegistrationID + 27, // 33: sa.StorageAuthority.GetValidOrderAuthorizations2:input_type -> sa.GetValidOrderAuthorizationsRequest + 14, // 34: sa.StorageAuthority.CountInvalidAuthorizations2:input_type -> sa.CountInvalidAuthorizationsRequest + 4, // 35: sa.StorageAuthority.GetValidAuthorizations2:input_type -> sa.GetValidAuthorizationsRequest + 39, // 36: sa.StorageAuthority.KeyBlocked:input_type -> sa.KeyBlockedRequest + 41, // 37: sa.StorageAuthority.SerialsForIncident:input_type -> sa.SerialsForIncidentRequest + 43, // 38: sa.StorageAuthority.GetRevokedCerts:input_type -> sa.GetRevokedCertsRequest + 50, // 39: sa.StorageAuthority.NewRegistration:input_type -> core.Registration + 50, // 40: sa.StorageAuthority.UpdateRegistration:input_type -> core.Registration + 21, // 41: sa.StorageAuthority.AddCertificate:input_type -> sa.AddCertificateRequest + 21, // 42: sa.StorageAuthority.AddPrecertificate:input_type -> sa.AddCertificateRequest + 20, // 43: sa.StorageAuthority.AddSerial:input_type -> sa.AddSerialRequest + 0, // 44: sa.StorageAuthority.DeactivateRegistration:input_type -> sa.RegistrationID + 24, // 45: sa.StorageAuthority.NewOrder:input_type -> sa.NewOrderRequest + 25, // 46: sa.StorageAuthority.NewOrderAndAuthzs:input_type -> sa.NewOrderAndAuthzsRequest + 23, // 47: sa.StorageAuthority.SetOrderProcessing:input_type -> sa.OrderRequest + 26, // 48: sa.StorageAuthority.SetOrderError:input_type -> sa.SetOrderErrorRequest + 29, // 49: sa.StorageAuthority.FinalizeOrder:input_type -> sa.FinalizeOrderRequest + 23, // 50: sa.StorageAuthority.GetOrder:input_type -> sa.OrderRequest + 28, // 51: sa.StorageAuthority.GetOrderForNames:input_type -> sa.GetOrderForNamesRequest + 36, // 52: sa.StorageAuthority.RevokeCertificate:input_type -> sa.RevokeCertificateRequest + 36, // 53: sa.StorageAuthority.UpdateRevokedCertificate:input_type -> sa.RevokeCertificateRequest + 32, // 54: sa.StorageAuthority.NewAuthorizations2:input_type -> sa.AddPendingAuthorizationsRequest + 37, // 55: sa.StorageAuthority.FinalizeAuthorization2:input_type -> sa.FinalizeAuthorizationRequest + 34, // 56: sa.StorageAuthority.DeactivateAuthorization2:input_type -> sa.AuthorizationID2 + 38, // 57: sa.StorageAuthority.AddBlockedKey:input_type -> sa.AddBlockedKeyRequest + 50, // 58: sa.StorageAuthority.GetRegistration:output_type -> core.Registration + 50, // 59: sa.StorageAuthority.GetRegistrationByKey:output_type -> core.Registration + 7, // 60: sa.StorageAuthority.GetSerialMetadata:output_type -> sa.SerialMetadata + 51, // 61: sa.StorageAuthority.GetCertificate:output_type -> core.Certificate + 51, // 62: sa.StorageAuthority.GetPrecertificate:output_type -> core.Certificate + 52, // 63: sa.StorageAuthority.GetCertificateStatus:output_type -> core.CertificateStatus + 12, // 64: sa.StorageAuthority.CountCertificatesByNames:output_type -> sa.CountByNames + 9, // 65: sa.StorageAuthority.CountRegistrationsByIP:output_type -> sa.Count + 9, // 66: sa.StorageAuthority.CountRegistrationsByIPRange:output_type -> sa.Count + 9, // 67: sa.StorageAuthority.CountOrders:output_type -> sa.Count + 9, // 68: sa.StorageAuthority.CountFQDNSets:output_type -> sa.Count + 10, // 69: sa.StorageAuthority.FQDNSetTimestampsForWindow:output_type -> sa.Timestamps + 19, // 70: sa.StorageAuthority.FQDNSetExists:output_type -> sa.Exists + 19, // 71: sa.StorageAuthority.PreviousCertificateExists:output_type -> sa.Exists + 47, // 72: sa.StorageAuthority.GetAuthorization2:output_type -> core.Authorization + 31, // 73: sa.StorageAuthority.GetAuthorizations2:output_type -> sa.Authorizations + 47, // 74: sa.StorageAuthority.GetPendingAuthorization2:output_type -> core.Authorization + 9, // 75: sa.StorageAuthority.CountPendingAuthorizations2:output_type -> sa.Count + 31, // 76: sa.StorageAuthority.GetValidOrderAuthorizations2:output_type -> sa.Authorizations + 9, // 77: sa.StorageAuthority.CountInvalidAuthorizations2:output_type -> sa.Count + 31, // 78: sa.StorageAuthority.GetValidAuthorizations2:output_type -> sa.Authorizations + 19, // 79: sa.StorageAuthority.KeyBlocked:output_type -> sa.Exists + 42, // 80: sa.StorageAuthority.SerialsForIncident:output_type -> sa.IncidentSerial + 53, // 81: sa.StorageAuthority.GetRevokedCerts:output_type -> core.CRLEntry + 50, // 82: sa.StorageAuthority.NewRegistration:output_type -> core.Registration + 54, // 83: sa.StorageAuthority.UpdateRegistration:output_type -> google.protobuf.Empty + 22, // 84: sa.StorageAuthority.AddCertificate:output_type -> sa.AddCertificateResponse + 54, // 85: sa.StorageAuthority.AddPrecertificate:output_type -> google.protobuf.Empty + 54, // 86: sa.StorageAuthority.AddSerial:output_type -> google.protobuf.Empty + 54, // 87: sa.StorageAuthority.DeactivateRegistration:output_type -> google.protobuf.Empty + 55, // 88: sa.StorageAuthority.NewOrder:output_type -> core.Order + 55, // 89: sa.StorageAuthority.NewOrderAndAuthzs:output_type -> core.Order + 54, // 90: sa.StorageAuthority.SetOrderProcessing:output_type -> google.protobuf.Empty + 54, // 91: sa.StorageAuthority.SetOrderError:output_type -> google.protobuf.Empty + 54, // 92: sa.StorageAuthority.FinalizeOrder:output_type -> google.protobuf.Empty + 55, // 93: sa.StorageAuthority.GetOrder:output_type -> core.Order + 55, // 94: sa.StorageAuthority.GetOrderForNames:output_type -> core.Order + 54, // 95: sa.StorageAuthority.RevokeCertificate:output_type -> google.protobuf.Empty + 54, // 96: sa.StorageAuthority.UpdateRevokedCertificate:output_type -> google.protobuf.Empty + 35, // 97: sa.StorageAuthority.NewAuthorizations2:output_type -> sa.Authorization2IDs + 54, // 98: sa.StorageAuthority.FinalizeAuthorization2:output_type -> google.protobuf.Empty + 54, // 99: sa.StorageAuthority.DeactivateAuthorization2:output_type -> google.protobuf.Empty + 54, // 100: sa.StorageAuthority.AddBlockedKey:output_type -> google.protobuf.Empty + 58, // [58:101] is the sub-list for method output_type + 15, // [15:58] is the sub-list for method input_type 15, // [15:15] is the sub-list for extension type_name 15, // [15:15] is the sub-list for extension extendee 0, // [0:15] is the sub-list for field type_name @@ -3056,7 +3442,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountCertificatesByNamesRequest); i { + switch v := v.(*Timestamps); i { case 0: return &v.state case 1: @@ -3068,7 +3454,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountByNames); i { + switch v := v.(*CountCertificatesByNamesRequest); i { case 0: return &v.state case 1: @@ -3080,7 +3466,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountRegistrationsByIPRequest); i { + switch v := v.(*CountByNames); i { case 0: return &v.state case 1: @@ -3092,7 +3478,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountInvalidAuthorizationsRequest); i { + switch v := v.(*CountRegistrationsByIPRequest); i { case 0: return &v.state case 1: @@ -3104,7 +3490,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountOrdersRequest); i { + switch v := v.(*CountInvalidAuthorizationsRequest); i { case 0: return &v.state case 1: @@ -3116,7 +3502,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CountFQDNSetsRequest); i { + switch v := v.(*CountOrdersRequest); i { case 0: return &v.state case 1: @@ -3128,7 +3514,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FQDNSetExistsRequest); i { + switch v := v.(*CountFQDNSetsRequest); i { case 0: return &v.state case 1: @@ -3140,7 +3526,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PreviousCertificateExistsRequest); i { + switch v := v.(*FQDNSetExistsRequest); i { case 0: return &v.state case 1: @@ -3152,7 +3538,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Exists); i { + switch v := v.(*PreviousCertificateExistsRequest); i { case 0: return &v.state case 1: @@ -3164,7 +3550,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSerialRequest); i { + switch v := v.(*Exists); i { case 0: return &v.state case 1: @@ -3176,7 +3562,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateRequest); i { + switch v := v.(*AddSerialRequest); i { case 0: return &v.state case 1: @@ -3188,7 +3574,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCertificateResponse); i { + switch v := v.(*AddCertificateRequest); i { case 0: return &v.state case 1: @@ -3200,7 +3586,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OrderRequest); i { + switch v := v.(*AddCertificateResponse); i { case 0: return &v.state case 1: @@ -3212,7 +3598,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderRequest); i { + switch v := v.(*OrderRequest); i { case 0: return &v.state case 1: @@ -3224,7 +3610,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NewOrderAndAuthzsRequest); i { + switch v := v.(*NewOrderRequest); i { case 0: return &v.state case 1: @@ -3236,7 +3622,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetOrderErrorRequest); i { + switch v := v.(*NewOrderAndAuthzsRequest); i { case 0: return &v.state case 1: @@ -3248,7 +3634,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetValidOrderAuthorizationsRequest); i { + switch v := v.(*SetOrderErrorRequest); i { case 0: return &v.state case 1: @@ -3260,7 +3646,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOrderForNamesRequest); i { + switch v := v.(*GetValidOrderAuthorizationsRequest); i { case 0: return &v.state case 1: @@ -3272,7 +3658,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeOrderRequest); i { + switch v := v.(*GetOrderForNamesRequest); i { case 0: return &v.state case 1: @@ -3284,7 +3670,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAuthorizationsRequest); i { + switch v := v.(*FinalizeOrderRequest); i { case 0: return &v.state case 1: @@ -3296,7 +3682,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorizations); i { + switch v := v.(*GetAuthorizationsRequest); i { case 0: return &v.state case 1: @@ -3308,7 +3694,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddPendingAuthorizationsRequest); i { + switch v := v.(*Authorizations); i { case 0: return &v.state case 1: @@ -3320,7 +3706,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationIDs); i { + switch v := v.(*AddPendingAuthorizationsRequest); i { case 0: return &v.state case 1: @@ -3332,7 +3718,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AuthorizationID2); i { + switch v := v.(*AuthorizationIDs); i { case 0: return &v.state case 1: @@ -3344,7 +3730,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Authorization2IDs); i { + switch v := v.(*AuthorizationID2); i { case 0: return &v.state case 1: @@ -3356,7 +3742,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RevokeCertificateRequest); i { + switch v := v.(*Authorization2IDs); i { case 0: return &v.state case 1: @@ -3368,7 +3754,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FinalizeAuthorizationRequest); i { + switch v := v.(*RevokeCertificateRequest); i { case 0: return &v.state case 1: @@ -3380,7 +3766,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddBlockedKeyRequest); i { + switch v := v.(*FinalizeAuthorizationRequest); i { case 0: return &v.state case 1: @@ -3392,7 +3778,7 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyBlockedRequest); i { + switch v := v.(*AddBlockedKeyRequest); i { case 0: return &v.state case 1: @@ -3404,7 +3790,19 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidAuthorizations_MapElement); i { + switch v := v.(*KeyBlockedRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sa_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Incident); i { case 0: return &v.state case 1: @@ -3416,6 +3814,54 @@ func file_sa_proto_init() { } } file_sa_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SerialsForIncidentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sa_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IncidentSerial); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sa_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRevokedCertsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sa_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidAuthorizations_MapElement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sa_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Authorizations_MapElement); i { case 0: return &v.state @@ -3434,7 +3880,7 @@ func file_sa_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_sa_proto_rawDesc, NumEnums: 0, - NumMessages: 42, + NumMessages: 47, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto index 25d2d64348..6eafefbe43 100644 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto +++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa.proto @@ -21,6 +21,7 @@ service StorageAuthority { // Return a count of authorizations with status "invalid" that belong to // a given registration ID and expire in the given time range. rpc CountFQDNSets(CountFQDNSetsRequest) returns (Count) {} + rpc FQDNSetTimestampsForWindow(CountFQDNSetsRequest) returns (Timestamps) {} rpc FQDNSetExists(FQDNSetExistsRequest) returns (Exists) {} rpc PreviousCertificateExists(PreviousCertificateExistsRequest) returns (Exists) {} rpc GetAuthorization2(AuthorizationID2) returns (core.Authorization) {} @@ -31,6 +32,8 @@ service StorageAuthority { rpc CountInvalidAuthorizations2(CountInvalidAuthorizationsRequest) returns (Count) {} rpc GetValidAuthorizations2(GetValidAuthorizationsRequest) returns (Authorizations) {} rpc KeyBlocked(KeyBlockedRequest) returns (Exists) {} + rpc SerialsForIncident (SerialsForIncidentRequest) returns (stream IncidentSerial) {} + rpc GetRevokedCerts(GetRevokedCertsRequest) returns (stream core.CRLEntry) {} // Adders rpc NewRegistration(core.Registration) returns (core.Registration) {} rpc UpdateRegistration(core.Registration) returns (google.protobuf.Empty) {} @@ -107,6 +110,10 @@ message Count { int64 count = 1; } +message Timestamps { + repeated int64 timestamps = 1; // Unix timestamp (nanoseconds) +} + message CountCertificatesByNamesRequest { Range range = 1; repeated string names = 2; @@ -247,6 +254,7 @@ message RevokeCertificateRequest { int64 date = 3; // Unix timestamp (nanoseconds) int64 backdate = 5; // Unix timestamp (nanoseconds) bytes response = 4; + int64 issuerID = 6; } message FinalizeAuthorizationRequest { @@ -270,3 +278,29 @@ message AddBlockedKeyRequest { message KeyBlockedRequest { bytes keyHash = 1; } + +message Incident { + int64 id = 1; + string serialTable = 2; + string url = 3; + int64 renewBy = 4; // Unix timestamp (nanoseconds) + bool enabled = 5; +} + +message SerialsForIncidentRequest { + string incidentTable = 1; +} + +message IncidentSerial { + string serial = 1; + int64 registrationID = 2; + int64 orderID = 3; + int64 lastNoticeSent = 4; // Unix timestamp (nanoseconds) +} + +message GetRevokedCertsRequest { + int64 issuerNameID = 1; + int64 expiresAfter = 2; // Unix timestamp (nanoseconds), inclusive + int64 expiresBefore = 3; // Unix timestamp (nanoseconds), exclusive + int64 revokedBefore = 4; // Unix timestamp (nanoseconds) +} diff --git a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go index 3aae5354b3..7534016eb2 100644 --- a/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go +++ b/vendor/github.com/letsencrypt/boulder/sa/proto/sa_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: sa.proto package proto @@ -34,6 +38,7 @@ type StorageAuthorityClient interface { // Return a count of authorizations with status "invalid" that belong to // a given registration ID and expire in the given time range. CountFQDNSets(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Count, error) + FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) PreviousCertificateExists(ctx context.Context, in *PreviousCertificateExistsRequest, opts ...grpc.CallOption) (*Exists, error) GetAuthorization2(ctx context.Context, in *AuthorizationID2, opts ...grpc.CallOption) (*proto.Authorization, error) @@ -44,6 +49,8 @@ type StorageAuthorityClient interface { CountInvalidAuthorizations2(ctx context.Context, in *CountInvalidAuthorizationsRequest, opts ...grpc.CallOption) (*Count, error) GetValidAuthorizations2(ctx context.Context, in *GetValidAuthorizationsRequest, opts ...grpc.CallOption) (*Authorizations, error) KeyBlocked(ctx context.Context, in *KeyBlockedRequest, opts ...grpc.CallOption) (*Exists, error) + SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) + GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) // Adders NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) UpdateRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*emptypb.Empty, error) @@ -173,6 +180,15 @@ func (c *storageAuthorityClient) CountFQDNSets(ctx context.Context, in *CountFQD return out, nil } +func (c *storageAuthorityClient) FQDNSetTimestampsForWindow(ctx context.Context, in *CountFQDNSetsRequest, opts ...grpc.CallOption) (*Timestamps, error) { + out := new(Timestamps) + err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetTimestampsForWindow", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storageAuthorityClient) FQDNSetExists(ctx context.Context, in *FQDNSetExistsRequest, opts ...grpc.CallOption) (*Exists, error) { out := new(Exists) err := c.cc.Invoke(ctx, "/sa.StorageAuthority/FQDNSetExists", in, out, opts...) @@ -263,6 +279,70 @@ func (c *storageAuthorityClient) KeyBlocked(ctx context.Context, in *KeyBlockedR return out, nil } +func (c *storageAuthorityClient) SerialsForIncident(ctx context.Context, in *SerialsForIncidentRequest, opts ...grpc.CallOption) (StorageAuthority_SerialsForIncidentClient, error) { + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[0], "/sa.StorageAuthority/SerialsForIncident", opts...) + if err != nil { + return nil, err + } + x := &storageAuthoritySerialsForIncidentClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StorageAuthority_SerialsForIncidentClient interface { + Recv() (*IncidentSerial, error) + grpc.ClientStream +} + +type storageAuthoritySerialsForIncidentClient struct { + grpc.ClientStream +} + +func (x *storageAuthoritySerialsForIncidentClient) Recv() (*IncidentSerial, error) { + m := new(IncidentSerial) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageAuthorityClient) GetRevokedCerts(ctx context.Context, in *GetRevokedCertsRequest, opts ...grpc.CallOption) (StorageAuthority_GetRevokedCertsClient, error) { + stream, err := c.cc.NewStream(ctx, &StorageAuthority_ServiceDesc.Streams[1], "/sa.StorageAuthority/GetRevokedCerts", opts...) + if err != nil { + return nil, err + } + x := &storageAuthorityGetRevokedCertsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type StorageAuthority_GetRevokedCertsClient interface { + Recv() (*proto.CRLEntry, error) + grpc.ClientStream +} + +type storageAuthorityGetRevokedCertsClient struct { + grpc.ClientStream +} + +func (x *storageAuthorityGetRevokedCertsClient) Recv() (*proto.CRLEntry, error) { + m := new(proto.CRLEntry) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *storageAuthorityClient) NewRegistration(ctx context.Context, in *proto.Registration, opts ...grpc.CallOption) (*proto.Registration, error) { out := new(proto.Registration) err := c.cc.Invoke(ctx, "/sa.StorageAuthority/NewRegistration", in, out, opts...) @@ -452,6 +532,7 @@ type StorageAuthorityServer interface { // Return a count of authorizations with status "invalid" that belong to // a given registration ID and expire in the given time range. CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) + FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) PreviousCertificateExists(context.Context, *PreviousCertificateExistsRequest) (*Exists, error) GetAuthorization2(context.Context, *AuthorizationID2) (*proto.Authorization, error) @@ -462,6 +543,8 @@ type StorageAuthorityServer interface { CountInvalidAuthorizations2(context.Context, *CountInvalidAuthorizationsRequest) (*Count, error) GetValidAuthorizations2(context.Context, *GetValidAuthorizationsRequest) (*Authorizations, error) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) + SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error + GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error // Adders NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) UpdateRegistration(context.Context, *proto.Registration) (*emptypb.Empty, error) @@ -522,6 +605,9 @@ func (UnimplementedStorageAuthorityServer) CountOrders(context.Context, *CountOr func (UnimplementedStorageAuthorityServer) CountFQDNSets(context.Context, *CountFQDNSetsRequest) (*Count, error) { return nil, status.Errorf(codes.Unimplemented, "method CountFQDNSets not implemented") } +func (UnimplementedStorageAuthorityServer) FQDNSetTimestampsForWindow(context.Context, *CountFQDNSetsRequest) (*Timestamps, error) { + return nil, status.Errorf(codes.Unimplemented, "method FQDNSetTimestampsForWindow not implemented") +} func (UnimplementedStorageAuthorityServer) FQDNSetExists(context.Context, *FQDNSetExistsRequest) (*Exists, error) { return nil, status.Errorf(codes.Unimplemented, "method FQDNSetExists not implemented") } @@ -552,6 +638,12 @@ func (UnimplementedStorageAuthorityServer) GetValidAuthorizations2(context.Conte func (UnimplementedStorageAuthorityServer) KeyBlocked(context.Context, *KeyBlockedRequest) (*Exists, error) { return nil, status.Errorf(codes.Unimplemented, "method KeyBlocked not implemented") } +func (UnimplementedStorageAuthorityServer) SerialsForIncident(*SerialsForIncidentRequest, StorageAuthority_SerialsForIncidentServer) error { + return status.Errorf(codes.Unimplemented, "method SerialsForIncident not implemented") +} +func (UnimplementedStorageAuthorityServer) GetRevokedCerts(*GetRevokedCertsRequest, StorageAuthority_GetRevokedCertsServer) error { + return status.Errorf(codes.Unimplemented, "method GetRevokedCerts not implemented") +} func (UnimplementedStorageAuthorityServer) NewRegistration(context.Context, *proto.Registration) (*proto.Registration, error) { return nil, status.Errorf(codes.Unimplemented, "method NewRegistration not implemented") } @@ -820,6 +912,24 @@ func _StorageAuthority_CountFQDNSets_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _StorageAuthority_FQDNSetTimestampsForWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountFQDNSetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/sa.StorageAuthority/FQDNSetTimestampsForWindow", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageAuthorityServer).FQDNSetTimestampsForWindow(ctx, req.(*CountFQDNSetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _StorageAuthority_FQDNSetExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FQDNSetExistsRequest) if err := dec(in); err != nil { @@ -1000,6 +1110,48 @@ func _StorageAuthority_KeyBlocked_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _StorageAuthority_SerialsForIncident_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SerialsForIncidentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).SerialsForIncident(m, &storageAuthoritySerialsForIncidentServer{stream}) +} + +type StorageAuthority_SerialsForIncidentServer interface { + Send(*IncidentSerial) error + grpc.ServerStream +} + +type storageAuthoritySerialsForIncidentServer struct { + grpc.ServerStream +} + +func (x *storageAuthoritySerialsForIncidentServer) Send(m *IncidentSerial) error { + return x.ServerStream.SendMsg(m) +} + +func _StorageAuthority_GetRevokedCerts_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetRevokedCertsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageAuthorityServer).GetRevokedCerts(m, &storageAuthorityGetRevokedCertsServer{stream}) +} + +type StorageAuthority_GetRevokedCertsServer interface { + Send(*proto.CRLEntry) error + grpc.ServerStream +} + +type storageAuthorityGetRevokedCertsServer struct { + grpc.ServerStream +} + +func (x *storageAuthorityGetRevokedCertsServer) Send(m *proto.CRLEntry) error { + return x.ServerStream.SendMsg(m) +} + func _StorageAuthority_NewRegistration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(proto.Registration) if err := dec(in); err != nil { @@ -1393,6 +1545,10 @@ var StorageAuthority_ServiceDesc = grpc.ServiceDesc{ MethodName: "CountFQDNSets", Handler: _StorageAuthority_CountFQDNSets_Handler, }, + { + MethodName: "FQDNSetTimestampsForWindow", + Handler: _StorageAuthority_FQDNSetTimestampsForWindow_Handler, + }, { MethodName: "FQDNSetExists", Handler: _StorageAuthority_FQDNSetExists_Handler, @@ -1510,6 +1666,17 @@ var StorageAuthority_ServiceDesc = grpc.ServiceDesc{ Handler: _StorageAuthority_AddBlockedKey_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SerialsForIncident", + Handler: _StorageAuthority_SerialsForIncident_Handler, + ServerStreams: true, + }, + { + StreamName: "GetRevokedCerts", + Handler: _StorageAuthority_GetRevokedCerts_Handler, + ServerStreams: true, + }, + }, Metadata: "sa.proto", } diff --git a/vendor/github.com/mgutz/ansi/README.md b/vendor/github.com/mgutz/ansi/README.md index 8f8e20b7e4..05905abe27 100644 --- a/vendor/github.com/mgutz/ansi/README.md +++ b/vendor/github.com/mgutz/ansi/README.md @@ -34,6 +34,7 @@ Other examples ```go Color(s, "red") // red +Color(s, "red+d") // red dim Color(s, "red+b") // red bold Color(s, "red+B") // red blinking Color(s, "red+u") // red underline @@ -73,6 +74,7 @@ Foreground Attributes * B = Blink * b = bold * h = high intensity (bright) +* d = dim * i = inverse * s = strikethrough * u = underline diff --git a/vendor/github.com/mgutz/ansi/ansi.go b/vendor/github.com/mgutz/ansi/ansi.go index dc0413649e..9ab6979dea 100644 --- a/vendor/github.com/mgutz/ansi/ansi.go +++ b/vendor/github.com/mgutz/ansi/ansi.go @@ -24,9 +24,11 @@ const ( highIntensityBG = 100 start = "\033[" + normal = "0;" bold = "1;" - blink = "5;" + dim = "2;" underline = "4;" + blink = "5;" inverse = "7;" strikethrough = "9;" @@ -164,10 +166,14 @@ func colorCode(style string) *bytes.Buffer { buf.WriteString(start) base := normalIntensityFG + buf.WriteString(normal) // reset any previous style if len(fgStyle) > 0 { if strings.Contains(fgStyle, "b") { buf.WriteString(bold) } + if strings.Contains(fgStyle, "d") { + buf.WriteString(dim) + } if strings.Contains(fgStyle, "B") { buf.WriteString(blink) } diff --git a/vendor/github.com/mgutz/ansi/doc.go b/vendor/github.com/mgutz/ansi/doc.go index 43c217e11d..c93039b85f 100644 --- a/vendor/github.com/mgutz/ansi/doc.go +++ b/vendor/github.com/mgutz/ansi/doc.go @@ -58,6 +58,7 @@ Attributes B = Blink foreground u = underline foreground h = high intensity (bright) foreground, background + d = dim foreground i = inverse Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore new file mode 100644 index 0000000000..96b11286e5 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.gitignore @@ -0,0 +1,2 @@ +coverage.out +.directory \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/.travis.yml b/vendor/github.com/montanaflynn/stats/.travis.yml new file mode 100644 index 0000000000..697dcb7591 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/.travis.yml @@ -0,0 +1,20 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip +before_install: + - sudo pip install codecov +script: + - go test +after_success: + - codecov +notifications: + email: + recipients: + - montana@montanaflynn.me + on_success: change + on_failure: always diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md new file mode 100644 index 0000000000..532f6ed3fd --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -0,0 +1,64 @@ +# Change Log + +## [0.2.0](https://github.com/montanaflynn/stats/tree/0.2.0) + +### Merged pull requests: + +- Fixed typographical error, changed accomdate to accommodate in README. [\#5](https://github.com/montanaflynn/stats/pull/5) ([saromanov](https://github.com/orthographic-pedant)) + +### Package changes: + +- Add `Correlation` function +- Add `Covariance` function +- Add `StandardDeviation` function to be the same as `StandardDeviationPopulation` +- Change `Variance` function to be the same as `PopulationVariation` +- Add helper methods to `Float64Data` +- Add `Float64Data` type to use instead of `[]float64` +- Add `Series` type which references to `[]Coordinate` + +## [0.1.0](https://github.com/montanaflynn/stats/tree/0.1.0) + +Several functions were renamed in this release. They will still function but may be deprecated in the future. + +### Package changes: + +- Rename `VarP` to `PopulationVariance` +- Rename `VarS` to `SampleVariance` +- Rename `LinReg` to `LinearRegression` +- Rename `ExpReg` to `ExponentialRegression` +- Rename `LogReg` to `LogarithmicRegression` +- Rename `StdDevP` to `StandardDeviationPopulation` +- Rename `StdDevS` to `StandardDeviationSample` + +## [0.0.9](https://github.com/montanaflynn/stats/tree/0.0.9) + +### Closed issues: + +- Functions have unexpected side effects [\#3](https://github.com/montanaflynn/stats/issues/3) +- Percentile is not calculated correctly [\#2](https://github.com/montanaflynn/stats/issues/2) + +### Merged pull requests: + +- Sample [\#4](https://github.com/montanaflynn/stats/pull/4) ([saromanov](https://github.com/saromanov)) + +### Package changes: + +- Add HarmonicMean func +- Add GeometricMean func +- Add Outliers stuct and QuantileOutliers func +- Add Interquartile Range, Midhinge and Trimean examples +- Add Trimean +- Add Midhinge +- Add Inter Quartile Range +- Add Quantiles struct and Quantile func +- Add Nearest Rank method of calculating percentiles +- Add errors for all functions +- Add sample +- Add Linear, Exponential and Logarithmic Regression +- Add sample and population variance and deviation +- Add Percentile and Float64ToInt +- Add Round +- Add Standard deviation +- Add Sum +- Add Min and Ma- x +- Add Mean, Median and Mode diff --git a/vendor/github.com/montanaflynn/stats/LICENSE b/vendor/github.com/montanaflynn/stats/LICENSE new file mode 100644 index 0000000000..6648181765 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/montanaflynn/stats/Makefile b/vendor/github.com/montanaflynn/stats/Makefile new file mode 100644 index 0000000000..87844f485d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/Makefile @@ -0,0 +1,29 @@ +.PHONY: all + +doc: + godoc `pwd` + +webdoc: + godoc -http=:44444 + +format: + go fmt + +test: + go test -race + +check: format test + +benchmark: + go test -bench=. -benchmem + +coverage: + go test -coverprofile=coverage.out + go tool cover -html="coverage.out" + +lint: format + go get github.com/alecthomas/gometalinter + gometalinter --install + gometalinter + +default: lint test diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md new file mode 100644 index 0000000000..5f8a9291bf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/README.md @@ -0,0 +1,103 @@ +# Stats [![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][godoc-svg]][godoc-url] [![][license-svg]][license-url] + +A statistics package with many functions missing from the Golang standard library. See the [CHANGELOG.md](https://github.com/montanaflynn/stats/blob/master/CHANGELOG.md) for API changes and tagged releases you can vendor into your projects. + +> Statistics are used much like a drunk uses a lamppost: for support, not illumination. **- Vin Scully** + +## Installation + +``` +go get github.com/montanaflynn/stats +``` + +**Protip:** `go get -u github.com/montanaflynn/stats` updates stats to the latest version. + +## Usage + +The [entire API documentation](http://godoc.org/github.com/montanaflynn/stats) is available on GoDoc.org + +You can view docs offline with the following commands: + +``` +godoc ./ +godoc ./ Median +godoc ./ Float64Data +``` + +**Protip:** Generate HTML docs with `godoc -http=:4444` + +## Example + +All the functions can be seen in [examples/main.go](https://github.com/montanaflynn/stats/blob/master/examples/main.go) but here's a little taste: + +```go +// start with the some source data to use +var data = []float64{1, 2, 3, 4, 4, 5} + +median, _ := stats.Median(data) +fmt.Println(median) // 3.5 + +roundedMedian, _ := stats.Round(median, 0) +fmt.Println(roundedMedian) // 4 +``` + +**Protip:** You can [call methods](https://github.com/montanaflynn/stats/blob/master/examples/methods.go) on the data if using the Float64Data type: + +``` +var d stats.Float64Data = data + +max, _ := d.Max() +fmt.Println(max) // 5 +``` + +## Contributing + +If you have any suggestions, criticism or bug reports please [create an issue](https://github.com/montanaflynn/stats/issues) and I'll do my best to accommodate you. In addition simply starring the repo would show your support for the project and be very much appreciated! + +### Pull Requests + +Pull request are always welcome no matter how big or small. Here's an easy way to do it: + +1. Fork it and clone your fork +2. Create new branch (`git checkout -b some-thing`) +3. Make the desired changes +4. Ensure tests pass (`go test -cover` or `make test`) +5. Commit changes (`git commit -am 'Did something'`) +6. Push branch (`git push origin some-thing`) +7. Submit pull request + +To make things as seamless as possible please also consider the following steps: + +- Update `README.md` to include new public types or functions in the documentation section. +- Update `examples/main.go` with a simple example of the new feature. +- Keep 100% code coverage (you can check with `make coverage`). +- Run [`gometalinter`](https://github.com/alecthomas/gometalinter) and make your code pass. +- Squash needless commits into single units of work with `git rebase -i new-feature`. + +#### Makefile + +I've included a [Makefile](https://github.com/montanaflynn/stats/blob/master/Makefile) that has a lot of helper targets for common actions such as linting, testing, code coverage reporting and more. + +**Protip:** `watch -n 1 make check` will continuously format and test your code. + +## MIT License + +Copyright (c) 2014-2015 Montana Flynn + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +[travis-url]: https://travis-ci.org/montanaflynn/stats +[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg + +[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master +[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg + +[godoc-url]: https://godoc.org/github.com/montanaflynn/stats +[godoc-svg]: https://godoc.org/github.com/montanaflynn/stats?status.svg + +[license-url]: https://github.com/montanaflynn/stats/blob/master/LICENSE +[license-svg]: https://img.shields.io/badge/license-MIT-blue.svg diff --git a/vendor/github.com/montanaflynn/stats/correlation.go b/vendor/github.com/montanaflynn/stats/correlation.go new file mode 100644 index 0000000000..d759bf8c42 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/correlation.go @@ -0,0 +1,33 @@ +package stats + +import "math" + +// Correlation describes the degree of relationship between two sets of data +func Correlation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + sdev1, _ := StandardDeviationPopulation(data1) + sdev2, _ := StandardDeviationPopulation(data2) + + if sdev1 == 0 || sdev2 == 0 { + return 0, nil + } + + covp, _ := CovariancePopulation(data1, data2) + return covp / (sdev1 * sdev2), nil +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func Pearson(data1, data2 Float64Data) (float64, error) { + return Correlation(data1, data2) +} diff --git a/vendor/github.com/montanaflynn/stats/data.go b/vendor/github.com/montanaflynn/stats/data.go new file mode 100644 index 0000000000..a087f457a0 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/data.go @@ -0,0 +1,140 @@ +package stats + +// Float64Data is a named type for []float64 with helper methods +type Float64Data []float64 + +// Get item in slice +func (f Float64Data) Get(i int) float64 { return f[i] } + +// Len returns length of slice +func (f Float64Data) Len() int { return len(f) } + +// Less returns if one number is less than another +func (f Float64Data) Less(i, j int) bool { return f[i] < f[j] } + +// Swap switches out two numbers in slice +func (f Float64Data) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// Min returns the minimum number in the data +func (f Float64Data) Min() (float64, error) { return Min(f) } + +// Max returns the maximum number in the data +func (f Float64Data) Max() (float64, error) { return Max(f) } + +// Sum returns the total of all the numbers in the data +func (f Float64Data) Sum() (float64, error) { return Sum(f) } + +// Mean returns the mean of the data +func (f Float64Data) Mean() (float64, error) { return Mean(f) } + +// Median returns the median of the data +func (f Float64Data) Median() (float64, error) { return Median(f) } + +// Mode returns the mode of the data +func (f Float64Data) Mode() ([]float64, error) { return Mode(f) } + +// GeometricMean returns the median of the data +func (f Float64Data) GeometricMean() (float64, error) { return GeometricMean(f) } + +// HarmonicMean returns the mode of the data +func (f Float64Data) HarmonicMean() (float64, error) { return HarmonicMean(f) } + +// MedianAbsoluteDeviation the median of the absolute deviations from the dataset median +func (f Float64Data) MedianAbsoluteDeviation() (float64, error) { + return MedianAbsoluteDeviation(f) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func (f Float64Data) MedianAbsoluteDeviationPopulation() (float64, error) { + return MedianAbsoluteDeviationPopulation(f) +} + +// StandardDeviation the amount of variation in the dataset +func (f Float64Data) StandardDeviation() (float64, error) { + return StandardDeviation(f) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func (f Float64Data) StandardDeviationPopulation() (float64, error) { + return StandardDeviationPopulation(f) +} + +// StandardDeviationSample finds the amount of variation from a sample +func (f Float64Data) StandardDeviationSample() (float64, error) { + return StandardDeviationSample(f) +} + +// QuartileOutliers finds the mild and extreme outliers +func (f Float64Data) QuartileOutliers() (Outliers, error) { + return QuartileOutliers(f) +} + +// Percentile finds the relative standing in a slice of floats +func (f Float64Data) Percentile(p float64) (float64, error) { + return Percentile(f, p) +} + +// PercentileNearestRank finds the relative standing using the Nearest Rank method +func (f Float64Data) PercentileNearestRank(p float64) (float64, error) { + return PercentileNearestRank(f, p) +} + +// Correlation describes the degree of relationship between two sets of data +func (f Float64Data) Correlation(d Float64Data) (float64, error) { + return Correlation(f, d) +} + +// Pearson calculates the Pearson product-moment correlation coefficient between two variables. +func (f Float64Data) Pearson(d Float64Data) (float64, error) { + return Pearson(f, d) +} + +// Quartile returns the three quartile points from a slice of data +func (f Float64Data) Quartile(d Float64Data) (Quartiles, error) { + return Quartile(d) +} + +// InterQuartileRange finds the range between Q1 and Q3 +func (f Float64Data) InterQuartileRange() (float64, error) { + return InterQuartileRange(f) +} + +// Midhinge finds the average of the first and third quartiles +func (f Float64Data) Midhinge(d Float64Data) (float64, error) { + return Midhinge(d) +} + +// Trimean finds the average of the median and the midhinge +func (f Float64Data) Trimean(d Float64Data) (float64, error) { + return Trimean(d) +} + +// Sample returns sample from input with replacement or without +func (f Float64Data) Sample(n int, r bool) ([]float64, error) { + return Sample(f, n, r) +} + +// Variance the amount of variation in the dataset +func (f Float64Data) Variance() (float64, error) { + return Variance(f) +} + +// PopulationVariance finds the amount of variance within a population +func (f Float64Data) PopulationVariance() (float64, error) { + return PopulationVariance(f) +} + +// SampleVariance finds the amount of variance within a sample +func (f Float64Data) SampleVariance() (float64, error) { + return SampleVariance(f) +} + +// Covariance is a measure of how much two sets of data change +func (f Float64Data) Covariance(d Float64Data) (float64, error) { + return Covariance(f, d) +} + +// CovariancePopulation computes covariance for entire population between two variables. +func (f Float64Data) CovariancePopulation(d Float64Data) (float64, error) { + return CovariancePopulation(f, d) +} diff --git a/vendor/github.com/montanaflynn/stats/data_set_distances.go b/vendor/github.com/montanaflynn/stats/data_set_distances.go new file mode 100644 index 0000000000..2e549c8d49 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/data_set_distances.go @@ -0,0 +1,94 @@ +package stats + +import ( + "math" +) + +// Validate data for distance calculation +func validateData(dataPointX, dataPointY []float64) error { + if len(dataPointX) == 0 || len(dataPointY) == 0 { + return EmptyInput + } + + if len(dataPointX) != len(dataPointY) { + return SizeErr + } + return nil +} + +// Computes Chebyshev distance between two data sets +func ChebyshevDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + var tempDistance float64 + for i := 0; i < len(dataPointY); i++ { + tempDistance = math.Abs(dataPointX[i] - dataPointY[i]) + if distance < tempDistance { + distance = tempDistance + } + } + return distance, nil +} + +// +// Computes Euclidean distance between two data sets +// +func EuclideanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + ((dataPointX[i] - dataPointY[i]) * (dataPointX[i] - dataPointY[i])) + } + return math.Sqrt(distance), nil +} + +// +// Computes Manhattan distance between two data sets +// +func ManhattanDistance(dataPointX, dataPointY []float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + distance = 0 + for i := 0; i < len(dataPointX); i++ { + distance = distance + math.Abs(dataPointX[i]-dataPointY[i]) + } + return distance, nil +} + +// +// Computes minkowski distance between two data sets. +// +// Input: +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// Output: +// Distance or error +// +func MinkowskiDistance(dataPointX, dataPointY []float64, lambda float64) (distance float64, err error) { + err = validateData(dataPointX, dataPointY) + if err != nil { + return math.NaN(), err + } + for i := 0; i < len(dataPointY); i++ { + distance = distance + math.Pow(math.Abs(dataPointX[i]-dataPointY[i]), lambda) + } + distance = math.Pow(distance, float64(1/lambda)) + if math.IsInf(distance, 1) == true { + return math.NaN(), InfValue + } + return distance, nil +} diff --git a/vendor/github.com/montanaflynn/stats/deviation.go b/vendor/github.com/montanaflynn/stats/deviation.go new file mode 100644 index 0000000000..539c02bcfd --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/deviation.go @@ -0,0 +1,57 @@ +package stats + +import "math" + +// MedianAbsoluteDeviation finds the median of the absolute deviations from the dataset median +func MedianAbsoluteDeviation(input Float64Data) (mad float64, err error) { + return MedianAbsoluteDeviationPopulation(input) +} + +// MedianAbsoluteDeviationPopulation finds the median of the absolute deviations from the population median +func MedianAbsoluteDeviationPopulation(input Float64Data) (mad float64, err error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + i := copyslice(input) + m, _ := Median(i) + + for key, value := range i { + i[key] = math.Abs(value - m) + } + + return Median(i) +} + +// StandardDeviation the amount of variation in the dataset +func StandardDeviation(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StandardDeviationPopulation finds the amount of variation from the population +func StandardDeviationPopulation(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the population variance + vp, _ := PopulationVariance(input) + + // Return the population standard deviation + return math.Pow(vp, 0.5), nil +} + +// StandardDeviationSample finds the amount of variation from a sample +func StandardDeviationSample(input Float64Data) (sdev float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the sample variance + vs, _ := SampleVariance(input) + + // Return the sample standard deviation + return math.Pow(vs, 0.5), nil +} diff --git a/vendor/github.com/montanaflynn/stats/errors.go b/vendor/github.com/montanaflynn/stats/errors.go new file mode 100644 index 0000000000..0bb32f0dd6 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/errors.go @@ -0,0 +1,22 @@ +package stats + +type statsErr struct { + err string +} + +func (s statsErr) Error() string { + return s.err +} + +// These are the package-wide error values. +// All error identification should use these values. +var ( + EmptyInput = statsErr{"Input must not be empty."} + SampleSize = statsErr{"Samples number must be less than input length."} + NaNErr = statsErr{"Not a number"} + NegativeErr = statsErr{"Slice must not contain negative values."} + ZeroErr = statsErr{"Slice must not contain zero values."} + BoundsErr = statsErr{"Input is outside of range."} + SizeErr = statsErr{"Slices must be the same length."} + InfValue = statsErr{"Value is infinite."} +) diff --git a/vendor/github.com/montanaflynn/stats/legacy.go b/vendor/github.com/montanaflynn/stats/legacy.go new file mode 100644 index 0000000000..17557abd99 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/legacy.go @@ -0,0 +1,36 @@ +package stats + +// VarP is a shortcut to PopulationVariance +func VarP(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// VarS is a shortcut to SampleVariance +func VarS(input Float64Data) (sdev float64, err error) { + return SampleVariance(input) +} + +// StdDevP is a shortcut to StandardDeviationPopulation +func StdDevP(input Float64Data) (sdev float64, err error) { + return StandardDeviationPopulation(input) +} + +// StdDevS is a shortcut to StandardDeviationSample +func StdDevS(input Float64Data) (sdev float64, err error) { + return StandardDeviationSample(input) +} + +// LinReg is a shortcut to LinearRegression +func LinReg(s []Coordinate) (regressions []Coordinate, err error) { + return LinearRegression(s) +} + +// ExpReg is a shortcut to ExponentialRegression +func ExpReg(s []Coordinate) (regressions []Coordinate, err error) { + return ExponentialRegression(s) +} + +// LogReg is a shortcut to LogarithmicRegression +func LogReg(s []Coordinate) (regressions []Coordinate, err error) { + return LogarithmicRegression(s) +} diff --git a/vendor/github.com/montanaflynn/stats/load.go b/vendor/github.com/montanaflynn/stats/load.go new file mode 100644 index 0000000000..1012d0bb54 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/load.go @@ -0,0 +1,184 @@ +package stats + +import ( + "strconv" + "time" +) + +// LoadRawData parses and converts a slice of mixed data types to floats +func LoadRawData(raw interface{}) (f Float64Data) { + var r []interface{} + var s Float64Data + + switch t := raw.(type) { + case []interface{}: + r = t + case []uint: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []uint64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []bool: + for _, v := range t { + if v == true { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case []float64: + return Float64Data(t) + case []int: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int8: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int16: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int32: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []int64: + for _, v := range t { + s = append(s, float64(v)) + } + return s + case []string: + for _, v := range t { + r = append(r, v) + } + case []time.Duration: + for _, v := range t { + r = append(r, v) + } + case map[int]int: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]int64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]string: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + case map[int]uint: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint8: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint16: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint32: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]uint64: + for i := 0; i < len(t); i++ { + s = append(s, float64(t[i])) + } + return s + case map[int]bool: + for i := 0; i < len(t); i++ { + if t[i] == true { + s = append(s, 1.0) + } else { + s = append(s, 0.0) + } + } + return s + case map[int]float64: + for i := 0; i < len(t); i++ { + s = append(s, t[i]) + } + return s + case map[int]time.Duration: + for i := 0; i < len(t); i++ { + r = append(r, t[i]) + } + } + + for _, v := range r { + switch t := v.(type) { + case int: + a := float64(t) + f = append(f, a) + case uint: + f = append(f, float64(t)) + case float64: + f = append(f, t) + case string: + fl, err := strconv.ParseFloat(t, 64) + if err == nil { + f = append(f, fl) + } + case bool: + if t == true { + f = append(f, 1.0) + } else { + f = append(f, 0.0) + } + case time.Duration: + f = append(f, float64(t)) + } + } + return f +} diff --git a/vendor/github.com/montanaflynn/stats/max.go b/vendor/github.com/montanaflynn/stats/max.go new file mode 100644 index 0000000000..d0fdd42b48 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/max.go @@ -0,0 +1,24 @@ +package stats + +import "math" + +// Max finds the highest number in a slice +func Max(input Float64Data) (max float64, err error) { + + // Return an error if there are no numbers + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Get the first value as the starting point + max = input.Get(0) + + // Loop and replace higher values + for i := 1; i < input.Len(); i++ { + if input.Get(i) > max { + max = input.Get(i) + } + } + + return max, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mean.go b/vendor/github.com/montanaflynn/stats/mean.go new file mode 100644 index 0000000000..944bb65721 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mean.go @@ -0,0 +1,60 @@ +package stats + +import "math" + +// Mean gets the average of a slice of numbers +func Mean(input Float64Data) (float64, error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + sum, _ := input.Sum() + + return sum / float64(input.Len()), nil +} + +// GeometricMean gets the geometric mean for a slice of numbers +func GeometricMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the product of all the numbers + var p float64 + for _, n := range input { + if p == 0 { + p = n + } else { + p *= n + } + } + + // Calculate the geometric mean + return math.Pow(p, 1/float64(l)), nil +} + +// HarmonicMean gets the harmonic mean for a slice of numbers +func HarmonicMean(input Float64Data) (float64, error) { + + l := input.Len() + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the sum of all the numbers reciprocals and return an + // error for values that cannot be included in harmonic mean + var p float64 + for _, n := range input { + if n < 0 { + return math.NaN(), NegativeErr + } else if n == 0 { + return math.NaN(), ZeroErr + } + p += (1 / n) + } + + return float64(l) / p, nil +} diff --git a/vendor/github.com/montanaflynn/stats/median.go b/vendor/github.com/montanaflynn/stats/median.go new file mode 100644 index 0000000000..b13d8394bb --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/median.go @@ -0,0 +1,25 @@ +package stats + +import "math" + +// Median gets the median number in a slice of numbers +func Median(input Float64Data) (median float64, err error) { + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // No math is needed if there are no numbers + // For even numbers we add the two middle numbers + // and divide by two using the mean function above + // For odd numbers we just use the middle number + l := len(c) + if l == 0 { + return math.NaN(), EmptyInput + } else if l%2 == 0 { + median, _ = Mean(c[l/2-1 : l/2+1]) + } else { + median = float64(c[l/2]) + } + + return median, nil +} diff --git a/vendor/github.com/montanaflynn/stats/min.go b/vendor/github.com/montanaflynn/stats/min.go new file mode 100644 index 0000000000..4383852e15 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/min.go @@ -0,0 +1,26 @@ +package stats + +import "math" + +// Min finds the lowest number in a set of data +func Min(input Float64Data) (min float64, err error) { + + // Get the count of numbers in the slice + l := input.Len() + + // Return an error if there are no numbers + if l == 0 { + return math.NaN(), EmptyInput + } + + // Get the first value as the starting point + min = input.Get(0) + + // Iterate until done checking for a lower value + for i := 1; i < l; i++ { + if input.Get(i) < min { + min = input.Get(i) + } + } + return min, nil +} diff --git a/vendor/github.com/montanaflynn/stats/mode.go b/vendor/github.com/montanaflynn/stats/mode.go new file mode 100644 index 0000000000..1160faf285 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/mode.go @@ -0,0 +1,47 @@ +package stats + +// Mode gets the mode [most frequent value(s)] of a slice of float64s +func Mode(input Float64Data) (mode []float64, err error) { + // Return the input if there's only one number + l := input.Len() + if l == 1 { + return input, nil + } else if l == 0 { + return nil, EmptyInput + } + + c := sortedCopyDif(input) + // Traverse sorted array, + // tracking the longest repeating sequence + mode = make([]float64, 5) + cnt, maxCnt := 1, 1 + for i := 1; i < l; i++ { + switch { + case c[i] == c[i-1]: + cnt++ + case cnt == maxCnt && maxCnt != 1: + mode = append(mode, c[i-1]) + cnt = 1 + case cnt > maxCnt: + mode = append(mode[:0], c[i-1]) + maxCnt, cnt = cnt, 1 + default: + cnt = 1 + } + } + switch { + case cnt == maxCnt: + mode = append(mode, c[l-1]) + case cnt > maxCnt: + mode = append(mode[:0], c[l-1]) + maxCnt = cnt + } + + // Since length must be greater than 1, + // check for slices of distinct values + if maxCnt == 1 { + return Float64Data{}, nil + } + + return mode, nil +} diff --git a/vendor/github.com/montanaflynn/stats/outlier.go b/vendor/github.com/montanaflynn/stats/outlier.go new file mode 100644 index 0000000000..e969180ea7 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/outlier.go @@ -0,0 +1,44 @@ +package stats + +// Outliers holds mild and extreme outliers found in data +type Outliers struct { + Mild Float64Data + Extreme Float64Data +} + +// QuartileOutliers finds the mild and extreme outliers +func QuartileOutliers(input Float64Data) (Outliers, error) { + if input.Len() == 0 { + return Outliers{}, EmptyInput + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Calculate the quartiles and interquartile range + qs, _ := Quartile(copy) + iqr, _ := InterQuartileRange(copy) + + // Calculate the lower and upper inner and outer fences + lif := qs.Q1 - (1.5 * iqr) + uif := qs.Q3 + (1.5 * iqr) + lof := qs.Q1 - (3 * iqr) + uof := qs.Q3 + (3 * iqr) + + // Find the data points that are outside of the + // inner and upper fences and add them to mild + // and extreme outlier slices + var mild Float64Data + var extreme Float64Data + for _, v := range copy { + + if v < lof || v > uof { + extreme = append(extreme, v) + } else if v < lif || v > uif { + mild = append(mild, v) + } + } + + // Wrap them into our struct + return Outliers{mild, extreme}, nil +} diff --git a/vendor/github.com/montanaflynn/stats/percentile.go b/vendor/github.com/montanaflynn/stats/percentile.go new file mode 100644 index 0000000000..baf24d8e36 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/percentile.go @@ -0,0 +1,80 @@ +package stats + +import "math" + +// Percentile finds the relative standing in a slice of floats +func Percentile(input Float64Data, percent float64) (percentile float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + if percent <= 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Multiply percent by length of input + index := (percent / 100) * float64(len(c)) + + // Check if the index is a whole number + if index == float64(int64(index)) { + + // Convert float to int + i := int(index) + + // Find the value at the index + percentile = c[i-1] + + } else if index > 1 { + + // Convert float to int via truncation + i := int(index) + + // Find the average of the index and following values + percentile, _ = Mean(Float64Data{c[i-1], c[i]}) + + } else { + return math.NaN(), BoundsErr + } + + return percentile, nil + +} + +// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method +func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { + + // Find the length of items in the slice + il := input.Len() + + // Return an error for empty slices + if il == 0 { + return math.NaN(), EmptyInput + } + + // Return error for less than 0 or greater than 100 percentages + if percent < 0 || percent > 100 { + return math.NaN(), BoundsErr + } + + // Start by sorting a copy of the slice + c := sortedCopy(input) + + // Return the last item + if percent == 100.0 { + return c[il-1], nil + } + + // Find ordinal ranking + or := int(math.Ceil(float64(il) * percent / 100)) + + // Return the item that is in the place of the ordinal rank + if or == 0 { + return c[0], nil + } + return c[or-1], nil + +} diff --git a/vendor/github.com/montanaflynn/stats/quartile.go b/vendor/github.com/montanaflynn/stats/quartile.go new file mode 100644 index 0000000000..29bb3a37a3 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/quartile.go @@ -0,0 +1,74 @@ +package stats + +import "math" + +// Quartiles holds the three quartile points +type Quartiles struct { + Q1 float64 + Q2 float64 + Q3 float64 +} + +// Quartile returns the three quartile points from a slice of data +func Quartile(input Float64Data) (Quartiles, error) { + + il := input.Len() + if il == 0 { + return Quartiles{}, EmptyInput + } + + // Start by sorting a copy of the slice + copy := sortedCopy(input) + + // Find the cutoff places depeding on if + // the input slice length is even or odd + var c1 int + var c2 int + if il%2 == 0 { + c1 = il / 2 + c2 = il / 2 + } else { + c1 = (il - 1) / 2 + c2 = c1 + 1 + } + + // Find the Medians with the cutoff points + Q1, _ := Median(copy[:c1]) + Q2, _ := Median(copy) + Q3, _ := Median(copy[c2:]) + + return Quartiles{Q1, Q2, Q3}, nil + +} + +// InterQuartileRange finds the range between Q1 and Q3 +func InterQuartileRange(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + qs, _ := Quartile(input) + iqr := qs.Q3 - qs.Q1 + return iqr, nil +} + +// Midhinge finds the average of the first and third quartiles +func Midhinge(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + qs, _ := Quartile(input) + mh := (qs.Q1 + qs.Q3) / 2 + return mh, nil +} + +// Trimean finds the average of the median and the midhinge +func Trimean(input Float64Data) (float64, error) { + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + c := sortedCopy(input) + q, _ := Quartile(c) + + return (q.Q1 + (q.Q2 * 2) + q.Q3) / 4, nil +} diff --git a/vendor/github.com/montanaflynn/stats/regression.go b/vendor/github.com/montanaflynn/stats/regression.go new file mode 100644 index 0000000000..a37a740609 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/regression.go @@ -0,0 +1,113 @@ +package stats + +import "math" + +// Series is a container for a series of data +type Series []Coordinate + +// Coordinate holds the data in a series +type Coordinate struct { + X, Y float64 +} + +// LinearRegression finds the least squares linear regression on data series +func LinearRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + // Placeholder for the math to be done + var sum [5]float64 + + // Loop over data keeping index in place + i := 0 + for ; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X + sum[3] += s[i].X * s[i].Y + sum[4] += s[i].Y * s[i].Y + } + + // Find gradient and intercept + f := float64(i) + gradient := (f*sum[3] - sum[0]*sum[1]) / (f*sum[2] - sum[0]*sum[0]) + intercept := (sum[1] / f) - (gradient * sum[0] / f) + + // Create the new regression series + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: s[j].X*gradient + intercept, + }) + } + + return regressions, nil + +} + +// ExponentialRegression returns an exponential regression on data series +func ExponentialRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + var sum [6]float64 + + for i := 0; i < len(s); i++ { + sum[0] += s[i].X + sum[1] += s[i].Y + sum[2] += s[i].X * s[i].X * s[i].Y + sum[3] += s[i].Y * math.Log(s[i].Y) + sum[4] += s[i].X * s[i].Y * math.Log(s[i].Y) + sum[5] += s[i].X * s[i].Y + } + + denominator := (sum[1]*sum[2] - sum[5]*sum[5]) + a := math.Pow(math.E, (sum[2]*sum[3]-sum[5]*sum[4])/denominator) + b := (sum[1]*sum[4] - sum[5]*sum[3]) / denominator + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: a * math.Exp(b*s[j].X), + }) + } + + return regressions, nil + +} + +// LogarithmicRegression returns an logarithmic regression on data series +func LogarithmicRegression(s Series) (regressions Series, err error) { + + if len(s) == 0 { + return nil, EmptyInput + } + + var sum [4]float64 + + i := 0 + for ; i < len(s); i++ { + sum[0] += math.Log(s[i].X) + sum[1] += s[i].Y * math.Log(s[i].X) + sum[2] += s[i].Y + sum[3] += math.Pow(math.Log(s[i].X), 2) + } + + f := float64(i) + a := (f*sum[1] - sum[2]*sum[0]) / (f*sum[3] - sum[0]*sum[0]) + b := (sum[2] - a*sum[0]) / f + + for j := 0; j < len(s); j++ { + regressions = append(regressions, Coordinate{ + X: s[j].X, + Y: b + a*math.Log(s[j].X), + }) + } + + return regressions, nil + +} diff --git a/vendor/github.com/montanaflynn/stats/round.go b/vendor/github.com/montanaflynn/stats/round.go new file mode 100644 index 0000000000..b66779c9fc --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/round.go @@ -0,0 +1,38 @@ +package stats + +import "math" + +// Round a float to a specific decimal place or precision +func Round(input float64, places int) (rounded float64, err error) { + + // If the float is not a number + if math.IsNaN(input) { + return math.NaN(), NaNErr + } + + // Find out the actual sign and correct the input for later + sign := 1.0 + if input < 0 { + sign = -1 + input *= -1 + } + + // Use the places arg to get the amount of precision wanted + precision := math.Pow(10, float64(places)) + + // Find the decimal place we are looking to round + digit := input * precision + + // Get the actual decimal number as a fraction to be compared + _, decimal := math.Modf(digit) + + // If the decimal is less than .5 we round down otherwise up + if decimal >= 0.5 { + rounded = math.Ceil(digit) + } else { + rounded = math.Floor(digit) + } + + // Finally we do the math to actually create a rounded number + return rounded / precision * sign, nil +} diff --git a/vendor/github.com/montanaflynn/stats/sample.go b/vendor/github.com/montanaflynn/stats/sample.go new file mode 100644 index 0000000000..a52f6dcaaf --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sample.go @@ -0,0 +1,44 @@ +package stats + +import "math/rand" + +// Sample returns sample from input with replacement or without +func Sample(input Float64Data, takenum int, replacement bool) ([]float64, error) { + + if input.Len() == 0 { + return nil, EmptyInput + } + + length := input.Len() + if replacement { + + result := Float64Data{} + rand.Seed(unixnano()) + + // In every step, randomly take the num for + for i := 0; i < takenum; i++ { + idx := rand.Intn(length) + result = append(result, input[idx]) + } + + return result, nil + + } else if !replacement && takenum <= length { + + rand.Seed(unixnano()) + + // Get permutation of number of indexies + perm := rand.Perm(length) + result := Float64Data{} + + // Get element of input by permutated index + for _, idx := range perm[0:takenum] { + result = append(result, input[idx]) + } + + return result, nil + + } + + return nil, BoundsErr +} diff --git a/vendor/github.com/montanaflynn/stats/sum.go b/vendor/github.com/montanaflynn/stats/sum.go new file mode 100644 index 0000000000..53485f17c2 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/sum.go @@ -0,0 +1,18 @@ +package stats + +import "math" + +// Sum adds all the numbers of a slice together +func Sum(input Float64Data) (sum float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Add em up + for _, n := range input { + sum += n + } + + return sum, nil +} diff --git a/vendor/github.com/montanaflynn/stats/util.go b/vendor/github.com/montanaflynn/stats/util.go new file mode 100644 index 0000000000..881997604d --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/util.go @@ -0,0 +1,43 @@ +package stats + +import ( + "sort" + "time" +) + +// float64ToInt rounds a float64 to an int +func float64ToInt(input float64) (output int) { + r, _ := Round(input, 0) + return int(r) +} + +// unixnano returns nanoseconds from UTC epoch +func unixnano() int64 { + return time.Now().UTC().UnixNano() +} + +// copyslice copies a slice of float64s +func copyslice(input Float64Data) Float64Data { + s := make(Float64Data, input.Len()) + copy(s, input) + return s +} + +// sortedCopy returns a sorted copy of float64s +func sortedCopy(input Float64Data) (copy Float64Data) { + copy = copyslice(input) + sort.Float64s(copy) + return +} + +// sortedCopyDif returns a sorted copy of float64s +// only if the original data isn't sorted. +// Only use this if returned slice won't be manipulated! +func sortedCopyDif(input Float64Data) (copy Float64Data) { + if sort.Float64sAreSorted(input) { + return input + } + copy = copyslice(input) + sort.Float64s(copy) + return +} diff --git a/vendor/github.com/montanaflynn/stats/variance.go b/vendor/github.com/montanaflynn/stats/variance.go new file mode 100644 index 0000000000..66e60c941f --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/variance.go @@ -0,0 +1,105 @@ +package stats + +import "math" + +// _variance finds the variance for both population and sample data +func _variance(input Float64Data, sample int) (variance float64, err error) { + + if input.Len() == 0 { + return math.NaN(), EmptyInput + } + + // Sum the square of the mean subtracted from each number + m, _ := Mean(input) + + for _, n := range input { + variance += (float64(n) - m) * (float64(n) - m) + } + + // When getting the mean of the squared differences + // "sample" will allow us to know if it's a sample + // or population and wether to subtract by one or not + return variance / float64((input.Len() - (1 * sample))), nil +} + +// Variance the amount of variation in the dataset +func Variance(input Float64Data) (sdev float64, err error) { + return PopulationVariance(input) +} + +// PopulationVariance finds the amount of variance within a population +func PopulationVariance(input Float64Data) (pvar float64, err error) { + + v, err := _variance(input, 0) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// SampleVariance finds the amount of variance within a sample +func SampleVariance(input Float64Data) (svar float64, err error) { + + v, err := _variance(input, 1) + if err != nil { + return math.NaN(), err + } + + return v, nil +} + +// Covariance is a measure of how much two sets of data change +func Covariance(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + // Calculate sum of squares + var ss float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + ss += (delta1*delta2 - ss) / float64(i+1) + } + + return ss * float64(l1) / float64(l1-1), nil +} + +// CovariancePopulation computes covariance for entire population between two variables. +func CovariancePopulation(data1, data2 Float64Data) (float64, error) { + + l1 := data1.Len() + l2 := data2.Len() + + if l1 == 0 || l2 == 0 { + return math.NaN(), EmptyInput + } + + if l1 != l2 { + return math.NaN(), SizeErr + } + + m1, _ := Mean(data1) + m2, _ := Mean(data2) + + var s float64 + for i := 0; i < l1; i++ { + delta1 := (data1.Get(i) - m1) + delta2 := (data2.Get(i) - m2) + s += delta1 * delta2 + } + + return s / float64(l1), nil +} diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/mozillazg/docker-credential-acr-helper/LICENSE similarity index 95% rename from vendor/github.com/go-stack/stack/LICENSE.md rename to vendor/github.com/mozillazg/docker-credential-acr-helper/LICENSE index 2abf98ea83..3c9120633b 100644 --- a/vendor/github.com/go-stack/stack/LICENSE.md +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2014 Chris Hines +Copyright (c) 2022 mozillazg Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go new file mode 100644 index 0000000000..72341b7068 --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/client.go @@ -0,0 +1,41 @@ +package acr + +import ( + "time" +) + +type Client struct{} + +type Credentials struct { + UserName string + Password string + ExpireTime time.Time +} + +func (c *Client) GetCredentials(serverURL string) (*Credentials, error) { + registry, err := parseServerURL(serverURL) + if err != nil { + return nil, err + } + + if registry.IsEE { + client, err := newEEClient(registry.Region) + if err != nil { + return nil, err + } + if registry.InstanceId == "" { + instanceId, err := client.getInstanceId(registry.InstanceName) + if err != nil { + return nil, err + } + registry.InstanceId = instanceId + } + return client.getCredentials(registry.InstanceId) + } + + client, err := newPersonClient(registry.Region) + if err != nil { + return nil, err + } + return client.getCredentials() +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go new file mode 100644 index 0000000000..d92095c82c --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/ee.go @@ -0,0 +1,79 @@ +package acr + +import ( + "fmt" + "time" + + cr2018 "github.com/alibabacloud-go/cr-20181201/client" + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + "github.com/alibabacloud-go/tea/tea" + "github.com/mozillazg/docker-credential-acr-helper/pkg/version" +) + +type eeClient struct { + client *cr2018.Client +} + +func newEEClient(region string) (*eeClient, error) { + cred, err := getOpenapiAuth() + if err != nil { + return nil, err + } + c := &openapi.Config{ + RegionId: tea.String(region), + Credential: cred, + UserAgent: tea.String(version.UserAgent()), + } + client, err := cr2018.NewClient(c) + if err != nil { + return nil, err + } + return &eeClient{client: client}, nil +} + +func (c *eeClient) getInstanceId(instanceName string) (string, error) { + req := &cr2018.ListInstanceRequest{ + InstanceName: tea.String(instanceName), + } + resp, err := c.client.ListInstance(req) + if err != nil { + return "", err + } + if resp.Body == nil { + return "", fmt.Errorf("get ACR EE instance id for name %q failed: %s", instanceName, resp.String()) + } + if !tea.BoolValue(resp.Body.IsSuccess) { + return "", fmt.Errorf("get ACR EE instance id for name %q failed: %s", instanceName, resp.Body.String()) + } + instances := resp.Body.Instances + if len(instances) == 0 { + return "", fmt.Errorf("get ACR EE instance id for name %q failed: instance name is not found", instanceName) + } + + return tea.StringValue(instances[0].InstanceId), nil +} + +func (c *eeClient) getCredentials(instanceId string) (*Credentials, error) { + req := &cr2018.GetAuthorizationTokenRequest{ + InstanceId: &instanceId, + } + resp, err := c.client.GetAuthorizationToken(req) + if err != nil { + return nil, err + } + if resp.Body == nil { + return nil, fmt.Errorf("get credentials failed: %s", resp.String()) + } + if !tea.BoolValue(resp.Body.IsSuccess) { + return nil, fmt.Errorf("get credentials failed: %s", resp.Body.String()) + } + + exp := tea.Int64Value(resp.Body.ExpireTime) / 1000 + expTime := time.Unix(exp, 0).UTC() + cred := &Credentials{ + UserName: tea.StringValue(resp.Body.TempUsername), + Password: tea.StringValue(resp.Body.AuthorizationToken), + ExpireTime: expTime, + } + return cred, nil +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go new file mode 100644 index 0000000000..696322342d --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/openapiauth.go @@ -0,0 +1,44 @@ +package acr + +import ( + "os" + "path/filepath" + + "github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper" + "github.com/aliyun/credentials-go/credentials" + "github.com/mozillazg/docker-credential-acr-helper/pkg/version" +) + +var defaultProfilePath = filepath.Join("~", ".alibabacloud", "credentials") + +func getOpenapiAuth() (credentials.Credential, error) { + profilePath := defaultProfilePath + if os.Getenv(credentials.ENVCredentialFile) != "" { + profilePath = os.Getenv(credentials.ENVCredentialFile) + } + path, err := expandPath(profilePath) + if err == nil { + if _, err := os.Stat(path); err == nil { + _ = os.Setenv(credentials.ENVCredentialFile, path) + } + } + var conf *credentials.Config + + if helper.HaveOidcCredentialRequiredEnv() { + return helper.NewOidcCredential(version.ProjectName) + } + + cred, err := credentials.NewCredential(conf) + return cred, err +} + +func expandPath(path string) (string, error) { + if len(path) > 0 && path[0] == '~' { + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + path = filepath.Join(home, path[1:]) + } + return path, nil +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go new file mode 100644 index 0000000000..fdf05e1d72 --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/person.go @@ -0,0 +1,126 @@ +package acr + +import ( + "fmt" + "time" + + cr2016 "github.com/alibabacloud-go/cr-20160607/client" + openapi "github.com/alibabacloud-go/darabonba-openapi/client" + util "github.com/alibabacloud-go/tea-utils/service" + "github.com/alibabacloud-go/tea/tea" + "github.com/mozillazg/docker-credential-acr-helper/pkg/version" +) + +type personClient struct { + client *cr2016.Client +} + +func newPersonClient(region string) (*personClient, error) { + cred, err := getOpenapiAuth() + if err != nil { + return nil, err + } + c := &openapi.Config{ + RegionId: tea.String(region), + Credential: cred, + UserAgent: tea.String(version.UserAgent()), + } + client, err := cr2016.NewClient(c) + if err != nil { + return nil, err + } + return &personClient{client: client}, nil +} + +func (c *personClient) getCredentials() (*Credentials, error) { + resp, err := c.GetAuthorizationToken() + if err != nil { + return nil, err + } + if resp.Body == nil || resp.Body.Data == nil { + return nil, fmt.Errorf("get credentials failed: %s", resp.String()) + } + + exp := tea.Int64Value(resp.Body.Data.ExpireTime) / 1000 + expTime := time.Unix(exp, 0).UTC() + cred := &Credentials{ + UserName: tea.StringValue(resp.Body.Data.TempUsername), + Password: tea.StringValue(resp.Body.Data.AuthorizationToken), + ExpireTime: expTime, + } + return cred, nil +} + +func (c *personClient) GetAuthorizationToken() (_result *getPersonAuthorizationTokenResponse, _err error) { + runtime := &util.RuntimeOptions{} + headers := make(map[string]*string) + _result = &getPersonAuthorizationTokenResponse{} + _body, _err := c.GetDefaultAuthorizationTokenWithOptions(headers, runtime) + if _err != nil { + return _result, _err + } + _result = _body + return _result, _err +} + +func (c *personClient) GetDefaultAuthorizationTokenWithOptions(headers map[string]*string, runtime *util.RuntimeOptions) (_result *getPersonAuthorizationTokenResponse, _err error) { + client := c.client + req := &openapi.OpenApiRequest{ + Headers: headers, + } + params := &openapi.Params{ + Action: tea.String("GetAuthorizationToken"), + Version: tea.String("2016-06-07"), + Protocol: tea.String("HTTPS"), + Pathname: tea.String("/tokens"), + Method: tea.String("GET"), + AuthType: tea.String("AK"), + Style: tea.String("ROA"), + ReqBodyType: tea.String("json"), + BodyType: tea.String("json"), + } + _result = &getPersonAuthorizationTokenResponse{} + _body, _err := client.CallApi(params, req, runtime) + if _err != nil { + return _result, _err + } + _err = tea.Convert(_body, &_result) + return _result, _err +} + +type getPersonAuthorizationTokenResponseBody struct { + Data *getPersonAuthorizationTokenData `json:"data,omitempty" xml:"data,omitempty"` +} + +func (s getPersonAuthorizationTokenResponseBody) String() string { + return tea.Prettify(s) +} +func (s getPersonAuthorizationTokenResponseBody) GoString() string { + return s.String() +} + +type getPersonAuthorizationTokenData struct { + AuthorizationToken *string `json:"authorizationToken,omitempty" xml:"authorizationToken,omitempty"` + ExpireTime *int64 `json:"expireDate,omitempty" xml:"expireDate,omitempty"` + TempUsername *string `json:"tempUserName,omitempty" xml:"tempUserName,omitempty"` + RequestId *string `json:"requestId,omitempty" xml:"requestId,omitempty"` + Code *string `json:"code,omitempty" xml:"code,omitempty"` +} + +type getPersonAuthorizationTokenResponse struct { + Headers map[string]*string `json:"headers,omitempty" xml:"headers,omitempty" require:"true"` + Body *getPersonAuthorizationTokenResponseBody `json:"body,omitempty" xml:"body,omitempty" require:"true"` +} + +func (s getPersonAuthorizationTokenResponse) String() string { + return tea.Prettify(s) +} + +func (s getPersonAuthorizationTokenResponse) GoString() string { + return s.String() +} + +func (s *getPersonAuthorizationTokenResponse) SetHeaders(v map[string]*string) *getPersonAuthorizationTokenResponse { + s.Headers = v + return s +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/registry.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/registry.go new file mode 100644 index 0000000000..c27dcfc2f7 --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/acr/registry.go @@ -0,0 +1,74 @@ +package acr + +import ( + "errors" + "net/url" + "os" + "regexp" + "strings" +) + +var errUnknownDomain = errors.New("unknown domain") +var domainPattern = regexp.MustCompile( + `^(?:(?P[^.\s]+)-)?registry(?:-intl)?(?:-vpc)?(?:-internal)?(?:\.distributed)?\.(?P[^.]+)\.(?:cr\.)?aliyuncs\.com`) + +const ( + urlPrefix = "https://" + hostNameSuffix = ".aliyuncs.com" + + envInstanceId = "DOCKER_CREDENTIAL_ACR_HELPER_INSTANCE_ID" + envRegion = "DOCKER_CREDENTIAL_ACR_HELPER_REGION" +) + +type Registry struct { + IsEE bool + InstanceId string + InstanceName string + Region string + Domain string +} + +func parseServerURL(rawURL string) (*Registry, error) { + instanceId := os.Getenv(envInstanceId) + if instanceId == "" { + if !strings.Contains(rawURL, hostNameSuffix) { + return nil, errUnknownDomain + } + } + + if !strings.HasPrefix(rawURL, urlPrefix) { + rawURL = urlPrefix + rawURL + } + serverURL, err := url.Parse(rawURL) + if err != nil { + return nil, err + } + domain := serverURL.Hostname() + + if instanceId == "" { + if !strings.HasSuffix(domain, hostNameSuffix) { + return nil, errUnknownDomain + } + } + + registry := &Registry{ + IsEE: instanceId != "", + InstanceId: instanceId, + InstanceName: "", + Region: os.Getenv(envRegion), + Domain: domain, + } + + // parse domain to get acr ee instance info + if registry.InstanceId == "" || registry.Region == "" { + subItems := domainPattern.FindStringSubmatch(domain) + if len(subItems) != 3 { + return nil, errUnknownDomain + } + registry.InstanceName = subItems[1] + registry.Region = subItems[2] + registry.IsEE = registry.InstanceName != "" + } + + return registry, nil +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go new file mode 100644 index 0000000000..5bf772ea30 --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper/helper.go @@ -0,0 +1,58 @@ +package credhelper + +import ( + "errors" + "fmt" + "io" + + "github.com/docker/docker-credential-helpers/credentials" + "github.com/mozillazg/docker-credential-acr-helper/pkg/acr" + "github.com/mozillazg/docker-credential-acr-helper/pkg/version" + "github.com/sirupsen/logrus" +) + +var errNotImplemented = errors.New("not implemented") + +type ACRHelper struct { + client *acr.Client + logger *logrus.Logger +} + +func NewACRHelper() *ACRHelper { + return &ACRHelper{ + client: &acr.Client{}, + logger: logrus.StandardLogger(), + } +} + +func (a *ACRHelper) WithLoggerOut(w io.Writer) *ACRHelper { + logger := logrus.New() + logger.Out = w + a.logger = logger + return a +} + +func (a *ACRHelper) Get(serverURL string) (string, string, error) { + // TODO: add cache + cred, err := a.client.GetCredentials(serverURL) + if err != nil { + a.logger.WithField("name", version.ProjectName). + WithField("serverURL", serverURL). + WithError(err).Error("get credentials failed") + return "", "", fmt.Errorf("%s: get credentials for %q failed: %s", + version.ProjectName, serverURL, err) + } + return cred.UserName, cred.Password, nil +} + +func (a *ACRHelper) Add(creds *credentials.Credentials) error { + return errNotImplemented +} + +func (a *ACRHelper) Delete(serverURL string) error { + return errNotImplemented +} + +func (a *ACRHelper) List() (map[string]string, error) { + return nil, errNotImplemented +} diff --git a/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/version/version.go b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/version/version.go new file mode 100644 index 0000000000..37e6f088c1 --- /dev/null +++ b/vendor/github.com/mozillazg/docker-credential-acr-helper/pkg/version/version.go @@ -0,0 +1,18 @@ +package version + +import ( + "fmt" + "runtime" +) + +var ( + ProjectName = "docker-credential-acr-helper" + Version = "0.0.0" + GitCommit = "unknown" + Timestamp = "unknown" +) + +func UserAgent() string { + return fmt.Sprintf("%s/%s (%s/%s) %s/%s", + ProjectName, Version, runtime.GOOS, runtime.GOARCH, GitCommit, Timestamp) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go index 5e6635c3e4..2e7f0ffdf8 100644 --- a/vendor/github.com/pelletier/go-toml/v2/errors.go +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -103,6 +103,7 @@ func (e *DecodeError) Key() Key { // // The function copies all bytes used in DecodeError, so that document and // highlight can be freely deallocated. +// //nolint:funlen func wrapDecodeError(document []byte, de *decodeError) *DecodeError { offset := danger.SubsliceOffset(document, de.highlight) diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go index 33c7f91555..9dec2e0007 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go @@ -11,10 +11,10 @@ import ( // // For example: // -// it := n.Children() -// for it.Next() { -// it.Node() -// } +// it := n.Children() +// for it.Next() { +// it.Node() +// } type Iterator struct { started bool node *Node diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 4eb526bbfb..acb288315b 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -54,7 +54,7 @@ func NewEncoder(w io.Writer) *Encoder { // This behavior can be controlled on an individual struct field basis with the // inline tag: // -// MyField `inline:"true"` +// MyField `toml:",inline"` func (enc *Encoder) SetTablesInline(inline bool) *Encoder { enc.tablesInline = inline return enc @@ -65,7 +65,7 @@ func (enc *Encoder) SetTablesInline(inline bool) *Encoder { // // This behavior can be controlled on an individual struct field basis with the multiline tag: // -// MyField `multiline:"true"` +// MyField `multiline:"true"` func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { enc.arraysMultiline = multiline return enc @@ -89,7 +89,7 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { // // If v cannot be represented to TOML it returns an error. // -// Encoding rules +// # Encoding rules // // A top level slice containing only maps or structs is encoded as [[table // array]]. @@ -117,7 +117,20 @@ func (enc *Encoder) SetIndentTables(indent bool) *Encoder { // When encoding structs, fields are encoded in order of definition, with their // exact name. // -// Struct tags +// Tables and array tables are separated by empty lines. However, consecutive +// subtables definitions are not. For example: +// +// [top1] +// +// [top2] +// [top2.child1] +// +// [[array]] +// +// [[array]] +// [array.child2] +// +// # Struct tags // // The encoding of each public struct field can be customized by the format // string in the "toml" key of the struct field's tag. This follows @@ -333,13 +346,13 @@ func isNil(v reflect.Value) bool { } } +func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { + return options.omitempty && isEmptyValue(v) +} + func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { var err error - if (ctx.options.omitempty || options.omitempty) && isEmptyValue(v) { - return b, nil - } - if !ctx.inline { b = enc.encodeComment(ctx.indent, options.comment, b) } @@ -365,6 +378,8 @@ func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v r func isEmptyValue(v reflect.Value) bool { switch v.Kind() { + case reflect.Struct: + return isEmptyStruct(v) case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: @@ -381,6 +396,34 @@ func isEmptyValue(v reflect.Value) bool { return false } +func isEmptyStruct(v reflect.Value) bool { + // TODO: merge with walkStruct and cache. + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + f := v.Field(i) + + if !isEmptyValue(f) { + return false + } + } + + return true +} + const literalQuote = '\'' func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { @@ -410,7 +453,6 @@ func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { return b } -//nolint:cyclop func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { stringQuote := `"` @@ -757,7 +799,13 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro } ctx.skipTableHeader = false + hasNonEmptyKV := false for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + hasNonEmptyKV = true + ctx.setKey(kv.Key) b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) @@ -768,7 +816,20 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro b = append(b, '\n') } + first := true for _, table := range t.tables { + if shouldOmitEmpty(table.Options, table.Value) { + continue + } + if first { + first = false + if hasNonEmptyKV { + b = append(b, '\n') + } + } else { + b = append(b, "\n"...) + } + ctx.setKey(table.Key) ctx.options = table.Options @@ -777,8 +838,6 @@ func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, erro if err != nil { return nil, err } - - b = append(b, '\n') } return b, nil @@ -791,6 +850,10 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte first := true for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + if first { first = false } else { @@ -806,7 +869,7 @@ func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte } if len(t.tables) > 0 { - panic("inline table cannot contain nested tables, online key-values") + panic("inline table cannot contain nested tables, only key-values") } b = append(b, "}"...) @@ -905,6 +968,10 @@ func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect. b = enc.encodeComment(ctx.indent, ctx.options.comment, b) for i := 0; i < v.Len(); i++ { + if i != 0 { + b = append(b, "\n"...) + } + b = append(b, scratch...) var err error diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index b3596f6d04..d0d7a72d08 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -79,22 +79,22 @@ func (d *Decoder) DisallowUnknownFields() *Decoder { // strict mode and a field is missing, a `toml.StrictMissingError` is // returned. In any other case, this function returns a standard Go error. // -// Type mapping +// # Type mapping // // List of supported TOML types and their associated accepted Go types: // -// String -> string -// Integer -> uint*, int*, depending on size -// Float -> float*, depending on size -// Boolean -> bool -// Offset Date-Time -> time.Time -// Local Date-time -> LocalDateTime, time.Time -// Local Date -> LocalDate, time.Time -// Local Time -> LocalTime, time.Time -// Array -> slice and array, depending on elements types -// Table -> map and struct -// Inline Table -> same as Table -// Array of Tables -> same as Array and Table +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { b, err := ioutil.ReadAll(d.r) if err != nil { @@ -123,7 +123,7 @@ type decoder struct { stashedExpr bool // Skip expressions until a table is found. This is set to true when a - // table could not be create (missing field in map), so all KV expressions + // table could not be created (missing field in map), so all KV expressions // need to be skipped. skipUntilTable bool @@ -344,9 +344,9 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err + default: + return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type()) } - - return d.handleArrayTable(key, v) } // When parsing an array table expression, each part of the key needs to be @@ -483,7 +483,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle d.errorContext.Struct = t d.errorContext.Field = path - f := v.FieldByIndex(path) + f := fieldByIndex(v, path) x, err := nextFn(key, f) if err != nil || d.skipUntilTable { return reflect.Value{}, err @@ -1071,7 +1071,7 @@ func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflec d.errorContext.Struct = t d.errorContext.Field = path - f := v.FieldByIndex(path) + f := fieldByIndex(v, path) x, err := d.handleKeyValueInner(key, value, f) if err != nil { return reflect.Value{}, err @@ -1135,6 +1135,21 @@ func initAndDereferencePointer(v reflect.Value) reflect.Value { return elem } +// Same as reflect.Value.FieldByIndex, but creates pointers if needed. +func fieldByIndex(v reflect.Value, path []int) reflect.Value { + for i, x := range path { + v = v.Field(x) + + if i < len(path)-1 && v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + return v +} + type fieldPathsMap = map[string][]int var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap @@ -1192,7 +1207,14 @@ func forEachField(t reflect.Type, path []int, do func(name string, path []int)) } if f.Anonymous && name == "" { - forEachField(f.Type, fieldPath, do) + t2 := f.Type + if t2.Kind() == reflect.Ptr { + t2 = t2.Elem() + } + + if t2.Kind() == reflect.Struct { + forEachField(t2, fieldPath, do) + } continue } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go index ac1ca3cf5f..cf05079fb8 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -69,9 +69,9 @@ type Collector interface { // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 00d70f09b6..de30de6daa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -51,7 +51,7 @@ type Counter interface { // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. +// than 128 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4bb816ab75..8bc5e44e2f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -20,6 +20,9 @@ import ( "strings" "github.com/cespare/xxhash/v2" + + "github.com/prometheus/client_golang/prometheus/internal" + //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" @@ -154,7 +157,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) return d } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go new file mode 100644 index 0000000000..614fd61be9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "os" + +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 0000000000..eaf8059ee1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b4102..ad9a71a5e0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -19,6 +19,10 @@ import ( "time" ) +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { @@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics { ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector { "A summary of the pause duration of garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( - memstatNamespace("last_gc_time_seconds"), + "go_memstats_last_gc_time_seconds", "Number of seconds since 1970 of last garbage collection.", nil, nil), goInfoDesc: NewDesc( @@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *baseGoCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131e7..897a6e906b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go similarity index 53% rename from vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go rename to vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index d43bdcddab..3a2d55e84b 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -25,10 +25,72 @@ import ( //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" - "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" ) +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + type goCollector struct { base baseGoCollector @@ -36,70 +98,124 @@ type goCollector struct { // snapshot is always produced by Collect. mu sync.Mutex - // rm... fields all pertain to the runtime/metrics package. - rmSampleBuf []metrics.Sample - rmSampleMap map[string]*metrics.Sample - rmMetrics []collectorMetric + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string // With Go 1.17, the runtime/metrics package was introduced. // From that point on, metric names produced by the runtime/metrics // package could be generated from runtime/metrics names. However, // these differ from the old names for the same values. // - // This field exist to export the same values under the old names + // This field exists to export the same values under the old names // as well. - msMetrics memStatsMetrics + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + //{Matcher: regexp.MustCompile("")}, + }, + } } // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime // of the process. var histograms []metrics.Sample - for _, d := range descriptions { + for _, d := range exposedDescriptions { if d.Kind == metrics.KindFloat64Histogram { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } - metrics.Read(histograms) + + if len(histograms) > 0 { + metrics.Read(histograms) + } + bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets } - // Generate a Desc and ValueType for each runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(descriptions)) - sampleBuf := make([]metrics.Sample, 0, len(descriptions)) - sampleMap := make(map[string]*metrics.Sample, len(descriptions)) - for i := range descriptions { - d := &descriptions[i] - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d) + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } - // Set up sample buffer for reading, and a map - // for quick lookup of sample values. sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] var m collectorMetric if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := rmExactSumMap[d.Name] + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] unit := d.Name[strings.IndexRune(d.Name, ':')+1:] m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description, + d.Description.Description, nil, nil, ), @@ -111,24 +227,61 @@ func NewGoCollector() Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, - }) + Help: d.Description.Description, + }, + ) } else { m = NewGauge(GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, + Help: d.Description.Description, }) } metricSet = append(metricSet, m) } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + return &goCollector{ - base: newBaseGoCollector(), - rmSampleBuf: sampleBuf, - rmSampleMap: sampleMap, - rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, } } @@ -138,7 +291,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) { for _, i := range c.msMetrics { ch <- i.desc } - for _, m := range c.rmMetrics { + for _, m := range c.rmExposedMetrics { ch <- m.Desc() } } @@ -148,8 +301,12 @@ func (c *goCollector) Collect(ch chan<- Metric) { // Collect base non-memory metrics. c.base.Collect(ch) + if len(c.sampleBuf) == 0 { + return + } + // Collect must be thread-safe, so prevent concurrent use of - // rmSampleBuf. Just read into rmSampleBuf but write all the data + // sampleBuf elements. Just read into sampleBuf but write all the data // we get into our Metrics or MemStats. // // This lock also ensures that the Metrics we send out are all from @@ -164,14 +321,17 @@ func (c *goCollector) Collect(ch chan<- Metric) { defer c.mu.Unlock() // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { // N.B. switch on concrete type because it's significantly more efficient // than checking for the Counter and Gauge interface implementations. In // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { + switch m := metric.(type) { case *counter: // Guard against decreases. This should never happen, but a failure // to do so will result in a panic, which is a harsh consequence for @@ -191,12 +351,15 @@ func (c *goCollector) Collect(ch chan<- Metric) { panic("unexpected metric type") } } - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } } } @@ -224,11 +387,6 @@ func unwrapScalarRMValue(v metrics.Value) float64 { } } -var rmExactSumMap = map[string]string{ - "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes", - "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes", -} - // exactSumFor takes a runtime/metrics metric name (that is assumed to // be of kind KindFloat64Histogram) and returns its exact sum and whether // its exact sum exists. @@ -236,11 +394,11 @@ var rmExactSumMap = map[string]string{ // The runtime/metrics API for histograms doesn't currently expose exact // sums, but some of the other metrics are in fact exact sums of histograms. func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := rmExactSumMap[rmName] + sumName, ok := c.rmExactSumMapForHist[rmName] if !ok { return 0 } - s, ok := c.rmSampleMap[sumName] + s, ok := c.sampleMap[sumName] if !ok { return 0 } @@ -261,35 +419,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { // while having Mallocs - Frees still represent a live object count. // Unfortunately, MemStats doesn't actually export a large allocation count, // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, // and often misleading due to the fact that it's an average over the lifetime @@ -324,6 +477,11 @@ type batchHistogram struct { // buckets must always be from the runtime/metrics package, following // the same conventions. func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } h := &batchHistogram{ desc: desc, buckets: buckets, @@ -382,8 +540,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error { for i, count := range h.counts { totalCount += count if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } } // Skip the +Inf bucket, but only for the bucket list. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 893802fd6b..0d47fecdc2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -581,11 +581,11 @@ func (h *constHistogram) Desc() *Desc { func (h *constHistogram) Write(out *dto.Metric) error { his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 0000000000..fd45cadc0c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,651 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no loger maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go new file mode 100644 index 0000000000..723b45d644 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -0,0 +1,32 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "regexp" + +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool +} + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a52180e..97d17d6cb6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // name has - replaced with _ and is concatenated with the unit and // other data. name = strings.ReplaceAll(name, "-", "_") - name = name + "_" + unit - if d.Cumulative { - name = name + "_total" + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" } valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) @@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go index 351c26e1ae..6515c11480 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -19,18 +19,34 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair -func (s metricSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s metricSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metricSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as @@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool { // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) + sort.Sort(MetricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2744443ac2..6eee198fef 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -39,7 +39,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", + "%w: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, @@ -49,7 +49,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) @@ -67,7 +67,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index dc121910a5..f0941f6f00 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -14,6 +14,9 @@ package prometheus import ( + "errors" + "math" + "sort" "strings" "time" @@ -115,22 +118,6 @@ func BuildFQName(namespace, subsystem, name string) string { return name } -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - type invalidMetric struct { desc *Desc err error @@ -174,3 +161,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error { func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts == (time.Time{}) { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 0000000000..7c12b21087 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 0000000000..7348df01df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 44128016fd..03773b21f7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -58,7 +58,7 @@ type ObserverVec interface { // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. +// invalid or if the provided labels contain more than 128 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 5bfe0ff5bb..8548dd18ed 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,7 +16,6 @@ package prometheus import ( "errors" "fmt" - "io/ioutil" "os" "strconv" "strings" @@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { } if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } + c.pidFn = getPIDFn() } else { c.pidFn = opts.PidFn } @@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) + content, err := os.ReadFile(pidFilePath) if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) } return pid, nil diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go new file mode 100644 index 0000000000..b1e363d6cf --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js +// +build js + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 2dc3660da0..c0152cdb61 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows -// +build !windows +//go:build !windows && !js +// +build !windows,!js package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go deleted file mode 100644 index f8d50d1f91..0000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promauto provides alternative constructors for the fundamental -// Prometheus metric types and their …Vec and …Func variants. The difference to -// their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. -// -// The following example is a complete program to create a histogram of normally -// distributed random numbers from the math/rand package: -// -// package main -// -// import ( -// "math/rand" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } -// -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// Prometheus's version of a minimal hello-world program: -// -// package main -// -// import ( -// "fmt" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// A Factory is created with the With(prometheus.Registerer) function, which -// enables two usage pattern. With(prometheus.Registerer) can be called once per -// line: -// -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// Or it can be used to create a Factory once to be used multiple times: -// -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code and method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// This appears very handy. So why are these constructors locked away in a -// separate package? -// -// The main problem is that registration may fail, e.g. if a metric inconsistent -// with or equal to the newly to be registered one is already registered. -// Therefore, the Register method in the prometheus.Registerer interface returns -// an error, and the same is the case for the top-level prometheus.Register -// function that registers with the global registry. The prometheus package also -// provides MustRegister versions for both. They panic if the registration -// fails, and they clearly call this out by using the Must… idiom. Panicking is -// problematic in this case because it doesn't just happen on input provided by -// the caller that is invalid on its own. Things are a bit more subtle here: -// Metric creation and registration tend to be spread widely over the -// codebase. It can easily happen that an incompatible metric is added to an -// unrelated part of the code, and suddenly code that used to work perfectly -// fine starts to panic (provided that the registration of the newly added -// metric happens before the registration of the previously existing -// metric). This may come as an even bigger surprise with the global registry, -// where simply importing another package can trigger a panic (if the newly -// imported package registers metrics in its init function). At least, in the -// prometheus package, creation of metrics and other collectors is separate from -// registration. You first create the metric, and then you decide explicitly if -// you want to register it with a local or the global registry, and if you want -// to handle the error or risk a panic. With the constructors in the promauto -// package, registration is automatic, and if it fails, it will always -// panic. Furthermore, the constructors will often be called in the var section -// of a file, which means that panicking will happen as a side effect of merely -// importing a package. -// -// A separate package allows conservative users to entirely ignore it. And -// whoever wants to use it, will do so explicitly, with an opportunity to read -// this warning. -// -// Enjoy promauto responsibly! -package promauto - -import "github.com/prometheus/client_golang/prometheus" - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. -func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - return With(prometheus.DefaultRegisterer).NewCounter(opts) -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec -// panics. -func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc -// panics. -func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the -// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. -func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - return With(prometheus.DefaultRegisterer).NewGauge(opts) -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. -func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. -func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. -func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - return With(prometheus.DefaultRegisterer).NewSummary(opts) -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec -// panics. -func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. -func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - return With(prometheus.DefaultRegisterer).NewHistogram(opts) -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec -// panics. -func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc -// panics. -func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) -} - -// Factory provides factory methods to create Collectors that are automatically -// registered with a Registerer. Create a Factory with the With function, -// providing a Registerer to auto-register created Collectors with. The zero -// value of a Factory creates Collectors that are not registered with any -// Registerer. All methods of the Factory panic if the registration fails. -type Factory struct { - r prometheus.Registerer -} - -// With creates a Factory using the provided Registerer for registration of the -// created Collectors. If the provided Registerer is nil, the returned Factory -// creates Collectors that are not registered with any Registerer. -func With(r prometheus.Registerer) Factory { return Factory{r} } - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the Factory's Registerer. -func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - c := prometheus.NewCounter(opts) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the Factory's -// Registerer. -func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - c := prometheus.NewCounterVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the Factory's -// Registerer. -func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - c := prometheus.NewCounterFunc(opts, function) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the Factory's Registerer. -func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - g := prometheus.NewGauge(opts) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the Factory's -// Registerer. -func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - g := prometheus.NewGaugeVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the Factory's -// Registerer. -func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - g := prometheus.NewGaugeFunc(opts, function) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the Factory's Registerer. -func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - s := prometheus.NewSummary(opts) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the Factory's -// Registerer. -func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - s := prometheus.NewSummaryVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the Factory's -// Registerer. -func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - h := prometheus.NewHistogram(opts) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the Factory's -// Registerer. -func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - h := prometheus.NewHistogramVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the Factory's -// Registerer. -func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - u := prometheus.NewUntypedFunc(opts, function) - if f.r != nil { - f.r.MustRegister(u) - } - return u -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index e7c0d05464..9819917b83 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } +type ( + closeNotifierDelegator struct{ *responseWriterDelegator } + flusherDelegator struct{ *responseWriterDelegator } + hijackerDelegator struct{ *responseWriterDelegator } + readerFromDelegator struct{ *responseWriterDelegator } + pusherDelegator struct{ *responseWriterDelegator } +) func (d closeNotifierDelegator) CloseNotify() <-chan bool { //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } + func (d flusherDelegator) Flush() { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() { } d.ResponseWriter.(http.Flusher).Flush() } + func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } + func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. @@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { d.written += n return n, err } + func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } @@ -261,7 +267,7 @@ func init() { http.Flusher }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 return struct { *responseWriterDelegator http.Pusher diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index d86d0cf4b0..a4cc9810b0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -33,6 +33,7 @@ package promhttp import ( "compress/gzip" + "errors" "fmt" "io" "net/http" @@ -84,6 +85,13 @@ func Handler() http.Handler { // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) +} + +// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which +// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after +// call to `done` of that `Gather`. +func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { var ( inFlightSem chan struct{} errCnt = prometheus.NewCounterVec( @@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { errCnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { return } } - mfs, err := reg.Gather() + mfs, done, err := reg.Gather() + defer done() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) @@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht cnt.WithLabelValues("500") cnt.WithLabelValues("503") if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { cnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) @@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht Help: "Current number of scrapes being served.", }) if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + are := &prometheus.AlreadyRegisteredError{} + if errors.As(err, are) { gge = are.ExistingCollector.(prometheus.Gauge) } else { panic(err) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 861b4d21ca..097aff2df6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { gauge.Inc() defer gauge.Dec() return next.RoundTrip(r) - }) + } } // InstrumentRoundTripperCounter is a middleware that wraps the provided @@ -59,22 +59,29 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // If the wrapped RoundTripper panics or returns a non-nil error, the Counter // is not incremented. // +// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. +// // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(counter) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { + exemplarAdd( + counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + 1, + rtOpts.getExemplarFn(r.Context()), + ) counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() } return resp, err - }) + } } // InstrumentRoundTripperDuration is a middleware that wraps the provided @@ -94,24 +101,30 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // If the wrapped RoundTripper panics or returns a non-nil error, no values are // reported. // +// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. +// // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} + rtOpts := defaultOptions() for _, o := range opts { - o(rtOpts) + o.apply(rtOpts) } code, method := checkLabels(obs) - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) + exemplarObserve( + obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), + time.Since(start).Seconds(), + rtOpts.getExemplarFn(r.Context()), + ) } return resp, err - }) + } } // InstrumentTrace is used to offer flexibility in instrumenting the available @@ -149,7 +162,7 @@ type InstrumentTrace struct { // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + return func(r *http.Request) (*http.Response, error) { start := time.Now() trace := &httptrace.ClientTrace{ @@ -231,5 +244,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) return next.RoundTrip(r) - }) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index a23f0edc6f..bfe5009877 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -28,6 +28,22 @@ import ( // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" +func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) { + if labels == nil { + obs.Observe(val) + return + } + obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) +} + +func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) { + if labels == nil { + obs.Add(val) + return + } + obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) +} + // InstrumentHandlerInFlight is a middleware that wraps the provided // http.Handler. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.Handler. @@ -48,7 +64,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // names are "code" and "method". The function panics otherwise. For the "method" // label a predefined default label value set is used to filter given values. // Values besides predefined values will count as `unknown` method. -//`WithExtraMethods` can be used to add more methods to the set. The Observe +// `WithExtraMethods` can be used to add more methods to the set. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -62,28 +78,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + exemplarObserve( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) - }) + + exemplarObserve( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler @@ -104,25 +129,34 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(counter) if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() - }) + + exemplarAdd( + counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() - }) + exemplarAdd( + counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + 1, + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided @@ -148,20 +182,24 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + exemplarObserve( + obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), + time.Since(now).Seconds(), + hOpts.getExemplarFn(r.Context()), + ) }) next.ServeHTTP(d, r) - }) + } } // InstrumentHandlerRequestSize is a middleware that wraps the provided @@ -184,27 +222,34 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) - }) + exemplarObserve( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) - }) + exemplarObserve( + obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), + float64(size), + hOpts.getExemplarFn(r.Context()), + ) + } } // InstrumentHandlerResponseSize is a middleware that wraps the provided @@ -227,9 +272,9 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - mwOpts := &option{} + hOpts := defaultOptions() for _, o := range opts { - o(mwOpts) + o.apply(hOpts) } code, method := checkLabels(obs) @@ -237,7 +282,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) + exemplarObserve( + obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), + float64(d.Written()), + hOpts.getExemplarFn(r.Context()), + ) }) } @@ -246,7 +295,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler // Collector does not have a Desc or has more than one Desc or its Desc is // invalid. It also panics if the Collector has any non-const, non-curried // labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code bool, method bool) { +func checkLabels(c prometheus.Collector) (code, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. var ( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go index 35e41bd1e6..c590d912c9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go @@ -13,19 +13,46 @@ package promhttp -// Option are used to configure a middleware or round tripper.. -type Option func(*option) +import ( + "context" -type option struct { - extraMethods []string + "github.com/prometheus/client_golang/prometheus" +) + +// Option are used to configure both handler (middleware) or round tripper. +type Option interface { + apply(*options) +} + +// options store options for both a handler or round tripper. +type options struct { + extraMethods []string + getExemplarFn func(requestCtx context.Context) prometheus.Labels +} + +func defaultOptions() *options { + return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} } +type optionApplyFunc func(*options) + +func (o optionApplyFunc) apply(opt *options) { o(opt) } + // WithExtraMethods adds additional HTTP methods to the list of allowed methods. // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. // // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. func WithExtraMethods(methods ...string) Option { - return func(o *option) { + return optionApplyFunc(func(o *options) { o.extraMethods = methods - } + }) +} + +// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. +// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric +// will get instrumented without exemplar. +func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { + return optionApplyFunc(func(o *options) { + o.getExemplarFn = getExemplarFn + }) } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 383a7f5941..325f665ff6 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,8 +15,8 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" @@ -289,7 +289,7 @@ func (r *Registry) Register(c Collector) error { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) } // Is the descID unique? @@ -407,6 +407,14 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) @@ -416,7 +424,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) - r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) @@ -556,7 +563,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } @@ -575,7 +582,7 @@ func WriteToTextfile(filename string, g Gatherer) error { return err } - if err := os.Chmod(tmp.Name(), 0644); err != nil { + if err := os.Chmod(tmp.Name(), 0o644); err != nil { return err } return os.Rename(tmp.Name(), filename) @@ -596,7 +603,7 @@ func processMetric( } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) + return fmt.Errorf("error collecting metric %v: %w", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. @@ -718,12 +725,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { for i, g := range gs { mfs, err := g.Gather() if err != nil { - if multiErr, ok := err.(MultiError); ok { + multiErr := MultiError{} + if errors.As(err, &multiErr) { for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } for _, mf := range mfs { @@ -884,11 +892,11 @@ func checkMetricConsistency( h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) + sort.Sort(internal.LabelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { @@ -935,7 +943,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || @@ -948,3 +956,89 @@ func checkDescConsistency( } return nil } + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go index b4e0ae11cb..2d3abc1cbd 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -23,6 +23,8 @@ import ( "github.com/golang/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/client_golang/prometheus/internal" + dto "github.com/prometheus/client_model/go" ) @@ -38,6 +40,23 @@ const ( UntypedValue ) +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -91,11 +110,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { + return nil, err + } + return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), + desc: desc, + metric: metric, }, nil } @@ -110,10 +133,8 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal } type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair + desc *Desc + metric *dto.Metric } func (m *constMetric) Desc() *Desc { @@ -121,7 +142,11 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil } func populateMetric( @@ -170,12 +195,12 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { }) } labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + sort.Sort(internal.LabelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 +const ExemplarMaxRunes = 128 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 4ababe6c98..7ae322590c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -99,6 +99,16 @@ func (m *MetricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + return m.metricMap.deleteByLabels(labels, m.curry) +} + // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. @@ -381,6 +391,82 @@ func (m *metricMap) deleteByHashWithLabels( return true } +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // @@ -485,7 +571,7 @@ func findMetricWithLabels( return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 74ee93280f..1498ee144c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -21,6 +21,8 @@ import ( "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -182,7 +184,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error { Value: proto.String(lv), }) } - sort.Sort(labelPairSorter(out.Label)) + sort.Sort(internal.LabelPairSorter(out.Label)) return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go index dc2eedeefc..f819e4f8b5 100644 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -12,6 +12,7 @@ // limitations under the License. // Build only when actually fuzzing +//go:build gofuzz // +build gofuzz package expfmt diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8a9313a3be..9d94ae9eff 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" @@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - ts, err := ptypes.Timestamp((*e).Timestamp) + err = (*e).Timestamp.CheckValid() if err != nil { return written, err } + ts := (*e).Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7f67b16e42..c909b8aa8c 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -193,7 +193,7 @@ func ParseDuration(durationStr string) (Duration, error) { // Allow 0 without a unit. return 0, nil case "": - return 0, fmt.Errorf("empty duration string") + return 0, errors.New("empty duration string") } matches := durationRE.FindStringSubmatch(durationStr) if matches == nil { diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore index 25e3659ab2..7cc33ae4a7 100644 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -1 +1,2 @@ -/fixtures/ +/testdata/fixtures/ +/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edacb..a197699a1e 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,12 @@ --- linters: enable: - - golint + - godot + - revive + +linter-settings: + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md index 9a1aff4127..d325872bdf 100644 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -## Prometheus Community Code of Conduct +# Prometheus Community Code of Conduct -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 943de7615e..853eb9d49b 100644 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of the file. Note that parsing the file's contents can still be performed one line at a time. This is done by first reading @@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t ``` The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does not bother to check the size of the file before reading. ``` data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index fa2bd5b528..7edfe4d093 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -14,18 +14,18 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" + @echo ">> extracting fixtures $*" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -fixtures: fixtures/.unpacked +fixtures: testdata/fixtures/.unpacked update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index a1b1ca40f4..6c8e3e2197 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),) endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.13.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.39.0 +GOLANGCI_LINT_VERSION ?= v1.45.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -144,32 +127,25 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ @@ -177,25 +153,21 @@ $(GOTEST_DIR): .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint @@ -212,28 +184,15 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @@ -289,12 +248,6 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - .PHONY: precheck precheck:: diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md index 67741f015a..fed02d85c7 100644 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ b/vendor/github.com/prometheus/procfs/SECURITY.md @@ -3,4 +3,4 @@ The Prometheus security policy, including how to report vulnerabilities, can be found here: -https://prometheus.io/docs/operating/security/ + diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 4e47e61720..68f36e888f 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -15,11 +15,28 @@ package procfs import ( "fmt" - "io/ioutil" "net" + "os" + "strconv" "strings" ) +// Learned from include/uapi/linux/if_arp.h. +const ( + // completed entry (ha valid). + ATFComplete = 0x02 + // permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + // ARPEntry contains a single row of the columnar data represented in // /proc/net/arp. type ARPEntry struct { @@ -29,12 +46,14 @@ type ARPEntry struct { HWAddr net.HardwareAddr // Name of the device Device string + // Flags + Flags byte } // GatherARPEntries retrieves all the ARP entries, parse the relevant columns, // and then return a slice of ARPEntry's. func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) } @@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err } return entry, nil } + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index 5623b24a16..ff6b927da1 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs @@ -27,7 +28,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. type CPUInfo struct { Processor uint VendorID string @@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode } // firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line +// and returns the contents of that line. func firstNonEmptyLine(scanner *bufio.Scanner) string { for scanner.Scan() { line := scanner.Text() diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 44b590ed38..64cfd534c1 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 91e272573a..c11207f3ab 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index 95b5b4ec44..ea41bf2ca1 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 6068bd571c..003bc2ad4a 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index e83c2e207c..1c9b7313b6 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (riscv || riscv64) // +build linux // +build riscv riscv64 diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index 26814eebaa..fa3686bc00 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index d5bedf97f3..a0ef55562e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 5e7eeef4a5..0000000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,7673 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cmdline -Lines: 1 -BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/stat -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/arp_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c -00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/ndisc_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb -00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0/device -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable -Lines: 9 -RxErr 0 -BadTLP 0 -BadDLLP 0 -Rollover 0 -Timeout 0 -NonFatalErr 0 -CorrIntErr 0 -HeaderOF 0 -TOTAL_ERR_COR 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_FATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_NONFATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/boot_vga -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/class -Lines: 1 -0x030000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/device -Lines: 1 -0x687f -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/irq -Lines: 1 -95 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpulist -Lines: 1 -0-15 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpus -Lines: 1 -0000ffff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used -Lines: 1 -144560128 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor -Lines: 1 -samsung -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/modalias -Lines: 1 -pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_bw -Lines: 1 -6641 815 256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_replay_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level -Lines: 1 -manual -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_state -Lines: 1 -performance -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_state -Lines: 1 -D0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_cur_state -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk -Lines: 5 -0: 600Mhz * -1: 720Mhz -2: 800Mhz -3: 847Mhz -4: 900Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk -Lines: 4 -0: 167Mhz * -1: 500Mhz -2: 800Mhz -3: 945Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie -Lines: 2 -0: 8.0GT/s, x16 -1: 8.0GT/s, x16 * -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk -Lines: 8 -0: 852Mhz * -1: 991Mhz -2: 1084Mhz -3: 1138Mhz -4: 1200Mhz -5: 1401Mhz -6: 1536Mhz -7: 1630Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk -Lines: 8 -0: 600Mhz -1: 720Mhz * -2: 800Mhz -3: 847Mhz -4: 900Mhz -5: 960Mhz -6: 1028Mhz -7: 1107Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_features -Lines: 32 -Current ppfeatures: 0x0000000019a1ff4f -FEATURES BITMASK ENABLEMENT -DPM_PREFETCHER 0x0000000000000001 Y -GFXCLK_DPM 0x0000000000000002 Y -UCLK_DPM 0x0000000000000004 Y -SOCCLK_DPM 0x0000000000000008 Y -UVD_DPM 0x0000000000000010 N -VCE_DPM 0x0000000000000020 N -ULV 0x0000000000000040 Y -MP0CLK_DPM 0x0000000000000080 N -LINK_DPM 0x0000000000000100 Y -DCEFCLK_DPM 0x0000000000000200 Y -AVFS 0x0000000000000400 Y -GFXCLK_DS 0x0000000000000800 Y -SOCCLK_DS 0x0000000000001000 Y -LCLK_DS 0x0000000000002000 Y -PPT 0x0000000000004000 Y -TDC 0x0000000000008000 Y -THERMAL 0x0000000000010000 Y -GFX_PER_CU_CG 0x0000000000020000 N -RM 0x0000000000040000 N -DCEFCLK_DS 0x0000000000080000 N -ACDC 0x0000000000100000 N -VR0HOT 0x0000000000200000 Y -VR1HOT 0x0000000000400000 N -FW_CTF 0x0000000000800000 Y -LED_DISPLAY 0x0000000001000000 Y -FAN_CONTROL 0x0000000002000000 N -FAST_PPT 0x0000000004000000 N -DIDT 0x0000000008000000 Y -ACG 0x0000000010000000 Y -PCC_LIMIT 0x0000000020000000 N -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_force_state -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_mclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_num_states -Lines: 3 -states: 2 -0 boot -1 performance -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage -Lines: 18 -OD_SCLK: -0: 852Mhz 800mV -1: 991Mhz 900mV -2: 1084Mhz 950mV -3: 1138Mhz 1000mV -4: 1200Mhz 1050mV -5: 1401Mhz 1100mV -6: 1536Mhz 1150mV -7: 1630Mhz 1200mV -OD_MCLK: -0: 167Mhz 800mV -1: 500Mhz 800mV -2: 800Mhz 950mV -3: 945Mhz 1100mV -OD_RANGE: -SCLK: 852MHz 2400MHz -MCLK: 167MHz 1500MHz -VDDC: 800mV 1200mV -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode -Lines: 8 -NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL - 0 BOOTUP_DEFAULT : 70 60 0 0 - 1 3D_FULL_SCREEN*: 70 60 1 3 - 2 POWER_SAVING : 90 60 0 0 - 3 VIDEO : 70 60 0 0 - 4 VR : 70 90 0 0 - 5 COMPUTE : 30 60 0 6 - 6 CUSTOM : 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_sclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_name -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/resource -Lines: 13 -0x0000007c00000000 0x0000007dffffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x000000000000d000 0x000000000000d0ff 0x0000000000040101 -0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 -0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/revision -Lines: 1 -0xc1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/serial_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_device -Lines: 1 -0x04c4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_vendor -Lines: 1 -0x1043 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging -Lines: 1 -0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/uevent -Lines: 6 -DRIVER=amdgpu -PCI_CLASS=30000 -PCI_ID=1002:687F -PCI_SUBSYS_ID=1043:04C4 -PCI_SLOT_NAME=0000:09:00.0 -MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/unique_id -Lines: 1 -0123456789abcdef -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vbios_version -Lines: 1 -115-D050PIL-100 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vendor -Lines: 1 -0x1002 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/board_id -Lines: 1 -HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver -Lines: 1 -1.27.0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data -Lines: 1 -345091702026 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets -Lines: 1 -638036947 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data -Lines: 1 -273558326543 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets -Lines: 1 -568318856 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate -Lines: 1 -100 Gb/sec (4X EDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme/nvme0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/firmware_rev -Lines: 1 -1B2QEXP7 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/model -Lines: 1 -Samsung SSD 970 PRO 512GB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/serial -Lines: 1 -S680HF8N190894I -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/state -Lines: 1 -live -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 0040753b1c..3c18c7610e 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,7 +26,7 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the common mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" ) diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6bb..b030951faf 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,7 +14,7 @@ package util import ( - "io/ioutil" + "os" "strconv" "strings" ) @@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) { // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) { // ReadIntFromFile reads a file and attempts to parse a int64 from it. func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 8051161b2a..71b7a70ebd 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -15,17 +15,16 @@ package util import ( "io" - "io/ioutil" "os" ) -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because // many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner +// Reads a max file size of 1024kB. For files larger than this, a scanner // should be used. func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 + const maxBufferSize = 1024 * 1024 f, err := os.Open(filename) if err != nil { @@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) { defer f.Close() reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index c07de0b6c9..1ab875ceec 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,!appengine +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine package util @@ -21,7 +23,7 @@ import ( "syscall" ) -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. // https://github.com/prometheus/node_exporter/pull/728/files // // Note that this function will not read files larger than 128 bytes. @@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using syscall directly. diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index bd55b45377..1d86f5e63f 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin package util diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 89e447746c..391c07957e 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(r) + statContent, err := io.ReadAll(r) if err != nil { return IPVSStats{}, err } diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index da3a941d60..db88566bdf 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 0cce190ec2..0096cafbdf 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// LoadAvg represents an entry in /proc/loadavg +// LoadAvg represents an entry in /proc/loadavg. type LoadAvg struct { Load1 float64 Load5 float64 diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index f0b9e5f75a..a95c889cb9 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -15,7 +15,7 @@ package procfs import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -64,7 +64,7 @@ type MDStat struct { // structs containing the relevant info. More information available here: // https://raid.wiki.kernel.org/index.php/Mdstat func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + data, err := os.ReadFile(fs.proc.Path("mdstat")) if err != nil { return nil, err } @@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) + } - sizeStr := strings.Fields(statusLine)[0] + sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 9964a3600b..8300daca05 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -25,7 +25,7 @@ import ( ) // A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core +// and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 Found uint64 @@ -38,12 +38,12 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } -// Parses a slice of ConntrackStatEntries from the given filepath +// Parses a slice of ConntrackStatEntries from the given filepath. func readConntrackStat(path string) ([]ConntrackStatEntry, error) { // This file is small and can be read with one syscall. b, err := util.ReadFileNoStat(path) @@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { return stat, nil } -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { var entries []ConntrackStatEntry @@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { return entries, nil } -// Parses a ConntrackStatEntry from given array of fields +// Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { if len(fields) != 17 { return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") @@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { return entry, nil } -// Parses a uint64 from given hex in string +// Parses a uint64 from given hex in string. func parseConntrackStatField(field string) (uint64, error) { val, err := strconv.ParseUint(field, 16, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 47a710befb..e66208aa05 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) { // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { return nil, errors.New("invalid net/dev line, missing colon") } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) var err error line := &NetDevLine{} // Interface Name - line.Name = strings.TrimSpace(parts[0]) + line.Name = strings.TrimSpace(rawLine[:idx]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 8c9ee3de87..7fd57d7f46 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -34,7 +34,7 @@ const ( readLimit = 4294967296 // Byte -> 4 GiB ) -// this contains generic data structures for both udp and tcp sockets +// This contains generic data structures for both udp and tcp sockets. type ( // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. NetIPSocket []*netIPSocketLine diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8c6de3791b..374b6f73f8 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// NetProtocolStats stores the contents from /proc/net/protocols +// NetProtocolStats stores the contents from /proc/net/protocols. type NetProtocolStats map[string]NetProtocolStatLine // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We @@ -41,7 +41,7 @@ type NetProtocolStatLine struct { Capabilities NetProtocolCapabilities } -// NetProtocolCapabilities contains a list of capabilities for each protocol +// NetProtocolCapabilities contains a list of capabilities for each protocol. type NetProtocolCapabilities struct { Close bool // 8 Connect bool // 9 diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 46f12c61d3..a94f86dc4a 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -30,13 +30,13 @@ import ( // * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 // and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. -// SoftnetStat contains a single row of data from /proc/net/softnet_stat +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { - // Number of processed packets + // Number of processed packets. Processed uint32 - // Number of dropped packets + // Number of dropped packets. Dropped uint32 - // Number of times processing packets ran out of quota + // Number of times processing packets ran out of quota. TimeSqueezed uint32 } diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go similarity index 96% rename from vendor/github.com/prometheus/procfs/xfrm.go rename to vendor/github.com/prometheus/procfs/net_xfrm.go index eed07c7d77..f9d9d243db 100644 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -79,10 +79,13 @@ type XfrmStat struct { // Policy is dead XfrmOutPolDead int // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired XfrmOutStateInvalid int - XfrmAcquireError int + // State hasn’t been fully acquired before use + XfrmAcquireError int } // NewXfrmStat reads the xfrm_stat statistics. diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 94d892f113..dcea9c5a67 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -21,13 +21,13 @@ import ( "strings" ) -// NetStat contains statistics for all the counters from one file +// NetStat contains statistics for all the counters from one file. type NetStat struct { - Filename string Stats map[string][]uint64 + Filename string } -// NetStat retrieves stats from /proc/net/stat/ +// NetStat retrieves stats from `/proc/net/stat/`. func (fs FS) NetStat() ([]NetStat, error) { statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) if err != nil { @@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) { // Other strings represent per-CPU counters for scanner.Scan() { for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 32) + value, err := strconv.ParseUint(counter, 16, 64) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 28f696803f..c30223af72 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -16,7 +16,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" @@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. // -// Deprecated: use fs.Proc() instead +// Deprecated: Use fs.Proc() instead. func (fs FS) NewProc(pid int) (Proc, error) { return fs.Proc(pid) } @@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return "", err } @@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) { return wd, err } -// RootDir returns the absolute path to the process's root directory (as set by chroot) +// RootDir returns the absolute path to the process's root directory (as set by chroot). func (p Proc) RootDir() (string, error) { rdir, err := os.Readlink(p.path("root")) if os.IsNotExist(err) { @@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { // Schedstat returns task scheduling information for the process. func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) + contents, err := os.ReadFile(p.path("schedstat")) if err != nil { return ProcSchedstat{}, err } diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index be45b79873..cca03327c3 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -45,7 +45,7 @@ type Cgroup struct { } // parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path +// Line format is hierarchyID:[controller1,controller2]:path. func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error @@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { return cgroup, nil } -// parseCgroups reads each line of the /proc/[pid]/cgroup file +// parseCgroups reads each line of the /proc/[pid]/cgroup file. func parseCgroups(data []byte) ([]Cgroup, error) { var cgroups []Cgroup scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system +// so the len of the returned struct is equal to the number of active hierarchies on this system. func (p Proc) Cgroups() ([]Cgroup, error) { data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 0000000000..24d4dce9cf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("failed to parse hierarchy ID") + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("failed to parse Cgroup Num") + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("failed to parse Enabled") + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 6134b3580c..57a89895d6 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Environ reads process environments from /proc//environ +// Environ reads process environments from `/proc//environ`. func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index cf63227f06..1bbdd4a8e9 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -22,7 +22,6 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Regexp variables var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) @@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) } func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } -// InotifyWatchLen returns the total number of inotify watches +// InotifyWatchLen returns the total number of inotify watches. func (p ProcFDInfos) InotifyWatchLen() (int, error) { length := 0 for _, f := range p { diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index dd20f198a3..7a1388185a 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -79,7 +79,7 @@ var ( // NewLimits returns the current soft limits of the process. // -// Deprecated: use p.Limits() instead +// Deprecated: Use p.Limits() instead. func (p Proc) NewLimits() (ProcLimits, error) { return p.Limits() } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 1d7772d516..f1bcbf32bb 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js package procfs @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool @@ -39,8 +41,8 @@ type ProcMapPermissions struct { Private bool } -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. type ProcMap struct { // The start address of current mapping. StartAddr uintptr @@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) { return unix.Mkdev(uint32(major), uint32(minor)), nil } -// parseAddress just converts a hex-string to a uintptr +// parseAddress converts a hex-string to a uintptr. func parseAddress(s string) (uintptr, error) { a, err := strconv.ParseUint(s, 16, 0) if err != nil { @@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) { return uintptr(a), nil } -// parseAddresses parses the start-end address +// parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { toks := strings.Split(s, "-") if len(toks) < 2 { diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 0000000000..48b5238194 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,440 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent float64 + SyncookiesRecv float64 + SyncookiesFailed float64 + EmbryonicRsts float64 + PruneCalled float64 + RcvPruned float64 + OfoPruned float64 + OutOfWindowIcmps float64 + LockDroppedIcmps float64 + ArpFilter float64 + TW float64 + TWRecycled float64 + TWKilled float64 + PAWSActive float64 + PAWSEstab float64 + DelayedACKs float64 + DelayedACKLocked float64 + DelayedACKLost float64 + ListenOverflows float64 + ListenDrops float64 + TCPHPHits float64 + TCPPureAcks float64 + TCPHPAcks float64 + TCPRenoRecovery float64 + TCPSackRecovery float64 + TCPSACKReneging float64 + TCPSACKReorder float64 + TCPRenoReorder float64 + TCPTSReorder float64 + TCPFullUndo float64 + TCPPartialUndo float64 + TCPDSACKUndo float64 + TCPLossUndo float64 + TCPLostRetransmit float64 + TCPRenoFailures float64 + TCPSackFailures float64 + TCPLossFailures float64 + TCPFastRetrans float64 + TCPSlowStartRetrans float64 + TCPTimeouts float64 + TCPLossProbes float64 + TCPLossProbeRecovery float64 + TCPRenoRecoveryFail float64 + TCPSackRecoveryFail float64 + TCPRcvCollapsed float64 + TCPDSACKOldSent float64 + TCPDSACKOfoSent float64 + TCPDSACKRecv float64 + TCPDSACKOfoRecv float64 + TCPAbortOnData float64 + TCPAbortOnClose float64 + TCPAbortOnMemory float64 + TCPAbortOnTimeout float64 + TCPAbortOnLinger float64 + TCPAbortFailed float64 + TCPMemoryPressures float64 + TCPMemoryPressuresChrono float64 + TCPSACKDiscard float64 + TCPDSACKIgnoredOld float64 + TCPDSACKIgnoredNoUndo float64 + TCPSpuriousRTOs float64 + TCPMD5NotFound float64 + TCPMD5Unexpected float64 + TCPMD5Failure float64 + TCPSackShifted float64 + TCPSackMerged float64 + TCPSackShiftFallback float64 + TCPBacklogDrop float64 + PFMemallocDrop float64 + TCPMinTTLDrop float64 + TCPDeferAcceptDrop float64 + IPReversePathFilter float64 + TCPTimeWaitOverflow float64 + TCPReqQFullDoCookies float64 + TCPReqQFullDrop float64 + TCPRetransFail float64 + TCPRcvCoalesce float64 + TCPOFOQueue float64 + TCPOFODrop float64 + TCPOFOMerge float64 + TCPChallengeACK float64 + TCPSYNChallenge float64 + TCPFastOpenActive float64 + TCPFastOpenActiveFail float64 + TCPFastOpenPassive float64 + TCPFastOpenPassiveFail float64 + TCPFastOpenListenOverflow float64 + TCPFastOpenCookieReqd float64 + TCPFastOpenBlackhole float64 + TCPSpuriousRtxHostQueues float64 + BusyPollRxPackets float64 + TCPAutoCorking float64 + TCPFromZeroWindowAdv float64 + TCPToZeroWindowAdv float64 + TCPWantZeroWindowAdv float64 + TCPSynRetrans float64 + TCPOrigDataSent float64 + TCPHystartTrainDetect float64 + TCPHystartTrainCwnd float64 + TCPHystartDelayDetect float64 + TCPHystartDelayCwnd float64 + TCPACKSkippedSynRecv float64 + TCPACKSkippedPAWS float64 + TCPACKSkippedSeq float64 + TCPACKSkippedFinWait2 float64 + TCPACKSkippedTimeWait float64 + TCPACKSkippedChallenge float64 + TCPWinProbe float64 + TCPKeepAlive float64 + TCPMTUPFail float64 + TCPMTUPSuccess float64 + TCPWqueueTooBig float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes float64 + InTruncatedPkts float64 + InMcastPkts float64 + OutMcastPkts float64 + InBcastPkts float64 + OutBcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InCsumErrors float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 + ReasmOverlaps float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = value + case "TW": + procNetstat.TcpExt.TW = value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = value + case "TWKilled": + procNetstat.TcpExt.TWKilled = value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = value + case "InOctets": + procNetstat.IpExt.InOctets = value + case "OutOctets": + procNetstat.IpExt.OutOctets = value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index dc6c14f0a4..a68fe15290 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -35,9 +35,10 @@ import ( const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. type PSILine struct { Avg10 float64 Avg60 float64 @@ -46,8 +47,9 @@ type PSILine struct { } // PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. type PSIStats struct { Some *PSILine Full *PSILine @@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { return parsePSIStats(resource, bytes.NewReader(data)) } -// parsePSIStats parses the specified file for pressure stall information +// parsePSIStats parses the specified file for pressure stall information. func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { psiStats := PSIStats{} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index a576a720a4..0e97d99575 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -28,30 +29,30 @@ import ( ) var ( - // match the header line before each mapped zone in /proc/pid/smaps + // match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM + // Amount of the mapping that is currently resident in RAM. Rss uint64 - // Process's proportional share of this mapping + // Process's proportional share of this mapping. Pss uint64 - // Size in bytes of clean shared pages + // Size in bytes of clean shared pages. SharedClean uint64 - // Size in bytes of dirty shared pages + // Size in bytes of dirty shared pages. SharedDirty uint64 - // Size in bytes of clean private pages + // Size in bytes of clean private pages. PrivateClean uint64 - // Size in bytes of dirty private pages + // Size in bytes of dirty private pages. PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed + // Amount of memory currently marked as referenced or accessed. Referenced uint64 - // Amount of memory that does not belong to any file + // Amount of memory that does not belong to any file. Anonymous uint64 - // Amount would-be-anonymous memory currently on swap + // Amount would-be-anonymous memory currently on swap. Swap uint64 - // Process's proportional memory on swap + // Process's proportional memory on swap. SwapPss uint64 } diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 0000000000..ae191896cb --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding float64 + DefaultTTL float64 + InReceives float64 + InHdrErrors float64 + InAddrErrors float64 + ForwDatagrams float64 + InUnknownProtos float64 + InDiscards float64 + InDelivers float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 +} + +type Icmp struct { + InMsgs float64 + InErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InTimeExcds float64 + InParmProbs float64 + InSrcQuenchs float64 + InRedirects float64 + InEchos float64 + InEchoReps float64 + InTimestamps float64 + InTimestampReps float64 + InAddrMasks float64 + InAddrMaskReps float64 + OutMsgs float64 + OutErrors float64 + OutDestUnreachs float64 + OutTimeExcds float64 + OutParmProbs float64 + OutSrcQuenchs float64 + OutRedirects float64 + OutEchos float64 + OutEchoReps float64 + OutTimestamps float64 + OutTimestampReps float64 + OutAddrMasks float64 + OutAddrMaskReps float64 +} + +type IcmpMsg struct { + InType3 float64 + OutType3 float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm float64 + RtoMin float64 + RtoMax float64 + MaxConn float64 + ActiveOpens float64 + PassiveOpens float64 + AttemptFails float64 + EstabResets float64 + CurrEstab float64 + InSegs float64 + OutSegs float64 + RetransSegs float64 + InErrs float64 + OutRsts float64 + InCsumErrors float64 +} + +type Udp struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", + fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = value + case "InReceives": + procSnmp.Ip.InReceives = value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = value + case "InDiscards": + procSnmp.Ip.InDiscards = value + case "InDelivers": + procSnmp.Ip.InDelivers = value + case "OutRequests": + procSnmp.Ip.OutRequests = value + case "OutDiscards": + procSnmp.Ip.OutDiscards = value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = value + case "ReasmFails": + procSnmp.Ip.ReasmFails = value + case "FragOKs": + procSnmp.Ip.FragOKs = value + case "FragFails": + procSnmp.Ip.FragFails = value + case "FragCreates": + procSnmp.Ip.FragCreates = value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = value + case "InErrors": + procSnmp.Icmp.InErrors = value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = value + case "InRedirects": + procSnmp.Icmp.InRedirects = value + case "InEchos": + procSnmp.Icmp.InEchos = value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = value + case "OutErrors": + procSnmp.Icmp.OutErrors = value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = value + case "OutEchos": + procSnmp.Icmp.OutEchos = value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = value + case "RtoMin": + procSnmp.Tcp.RtoMin = value + case "RtoMax": + procSnmp.Tcp.RtoMax = value + case "MaxConn": + procSnmp.Tcp.MaxConn = value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = value + case "EstabResets": + procSnmp.Tcp.EstabResets = value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = value + case "InSegs": + procSnmp.Tcp.InSegs = value + case "OutSegs": + procSnmp.Tcp.OutSegs = value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = value + case "InErrs": + procSnmp.Tcp.InErrs = value + case "OutRsts": + procSnmp.Tcp.OutRsts = value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = value + case "NoPorts": + procSnmp.Udp.NoPorts = value + case "InErrors": + procSnmp.Udp.InErrors = value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = value + case "NoPorts": + procSnmp.UdpLite.NoPorts = value + case "InErrors": + procSnmp.UdpLite.InErrors = value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 0000000000..f611992d52 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives float64 + InHdrErrors float64 + InTooBigErrors float64 + InNoRoutes float64 + InAddrErrors float64 + InUnknownProtos float64 + InTruncatedPkts float64 + InDiscards float64 + InDelivers float64 + OutForwDatagrams float64 + OutRequests float64 + OutDiscards float64 + OutNoRoutes float64 + ReasmTimeout float64 + ReasmReqds float64 + ReasmOKs float64 + ReasmFails float64 + FragOKs float64 + FragFails float64 + FragCreates float64 + InMcastPkts float64 + OutMcastPkts float64 + InOctets float64 + OutOctets float64 + InMcastOctets float64 + OutMcastOctets float64 + InBcastOctets float64 + OutBcastOctets float64 + InNoECTPkts float64 + InECT1Pkts float64 + InECT0Pkts float64 + InCEPkts float64 +} + +type Icmp6 struct { + InMsgs float64 + InErrors float64 + OutMsgs float64 + OutErrors float64 + InCsumErrors float64 + InDestUnreachs float64 + InPktTooBigs float64 + InTimeExcds float64 + InParmProblems float64 + InEchos float64 + InEchoReplies float64 + InGroupMembQueries float64 + InGroupMembResponses float64 + InGroupMembReductions float64 + InRouterSolicits float64 + InRouterAdvertisements float64 + InNeighborSolicits float64 + InNeighborAdvertisements float64 + InRedirects float64 + InMLDv2Reports float64 + OutDestUnreachs float64 + OutPktTooBigs float64 + OutTimeExcds float64 + OutParmProblems float64 + OutEchos float64 + OutEchoReplies float64 + OutGroupMembQueries float64 + OutGroupMembResponses float64 + OutGroupMembReductions float64 + OutRouterSolicits float64 + OutRouterAdvertisements float64 + OutNeighborSolicits float64 + OutNeighborAdvertisements float64 + OutRedirects float64 + OutMLDv2Reports float64 + InType1 float64 + InType134 float64 + InType135 float64 + InType136 float64 + InType143 float64 + OutType133 float64 + OutType135 float64 + OutType136 float64 + OutType143 float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 + IgnoredMulti float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams float64 + NoPorts float64 + InErrors float64 + OutDatagrams float64 + RcvbufErrors float64 + SndbufErrors float64 + InCsumErrors float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = value + case "InDiscards": + procSnmp6.Ip6.InDiscards = value + case "InDelivers": + procSnmp6.Ip6.InDelivers = value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = value + case "OutRequests": + procSnmp6.Ip6.OutRequests = value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = value + case "FragOKs": + procSnmp6.Ip6.FragOKs = value + case "FragFails": + procSnmp6.Ip6.FragFails = value + case "FragCreates": + procSnmp6.Ip6.FragCreates = value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = value + case "InOctets": + procSnmp6.Ip6.InOctets = value + case "OutOctets": + procSnmp6.Ip6.OutOctets = value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = value + case "InErrors": + procSnmp6.Icmp6.InErrors = value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = value + case "InEchos": + procSnmp6.Icmp6.InEchos = value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = value + case "InType1": + procSnmp6.Icmp6.InType1 = value + case "InType134": + procSnmp6.Icmp6.InType134 = value + case "InType135": + procSnmp6.Icmp6.InType135 = value + case "InType136": + procSnmp6.Icmp6.InType136 = value + case "InType143": + procSnmp6.Icmp6.InType143 = value + case "OutType133": + procSnmp6.Icmp6.OutType133 = value + case "OutType135": + procSnmp6.Icmp6.OutType135 = value + case "OutType136": + procSnmp6.Icmp6.OutType136 = value + case "OutType143": + procSnmp6.Icmp6.OutType143 = value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = value + case "NoPorts": + procSnmp6.Udp6.NoPorts = value + case "InErrors": + procSnmp6.Udp6.InErrors = value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = value + case "InErrors": + procSnmp6.UdpLite6.InErrors = value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 8c7b6e80a3..06c556ef96 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -81,10 +81,10 @@ type ProcStat struct { STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. - CUTime uint + CUTime int // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. - CSTime uint + CSTime int // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int @@ -115,7 +115,7 @@ type ProcStat struct { // NewStat returns the current status information of the process. // -// Deprecated: use p.Stat() instead +// Deprecated: Use p.Stat() instead. func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } @@ -141,6 +141,11 @@ func (p Proc) Stat() (ProcStat, error) { } s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 6edd8333b3..594022ded4 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -33,37 +33,37 @@ type ProcStatus struct { TGID int // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // nolint:revive // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // nolint:revive // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // nolint:revive // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // nolint:revive // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // nolint:revive // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // nolint:revive // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // nolint:revive // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // nolint:revive // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // nolint:revive // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // nolint:revive // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // nolint:revive // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // nolint:revive // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // nolint:revive // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // nolint:revive // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // nolint:revive // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // nolint:revive // Size of hugetlb memory portions HugetlbPages uint64 diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 0000000000..d46533ebf4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) + } + } + return values, nil +} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 28228164ef..5f7f32dc83 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -40,7 +40,7 @@ type Schedstat struct { CPUs []*SchedstatCPU } -// SchedstatCPU contains the values from one "cpu" line +// SchedstatCPU contains the values from one "cpu" line. type SchedstatCPU struct { CPUNum string @@ -49,14 +49,14 @@ type SchedstatCPU struct { RunTimeslices uint64 } -// ProcSchedstat contains the values from /proc//schedstat +// ProcSchedstat contains the values from `/proc//schedstat`. type ProcSchedstat struct { RunningNanoseconds uint64 WaitingNanoseconds uint64 RunTimeslices uint64 } -// Schedstat reads data from /proc/schedstat +// Schedstat reads data from `/proc/schedstat`. func (fs FS) Schedstat() (*Schedstat, error) { file, err := os.Open(fs.proc.Path("schedstat")) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 7896fd7242..bc9aaf5c28 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { return s, nil } -// SlabInfo reads data from /proc/slabinfo +// SlabInfo reads data from `/proc/slabinfo`. func (fs FS) SlabInfo() (SlabInfo, error) { // TODO: Consider passing options to allow for parsing different // slabinfo versions. However, slabinfo 2.1 has been stable since diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 0000000000..559129cbca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("softirqs empty") + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) + } + + return softirqs, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 6d8727541e..33f97caa08 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -41,7 +41,7 @@ type CPUStat struct { // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs +// It is possible to get per-cpu stats by reading `/proc/softirqs`. type SoftIRQStat struct { Hi uint64 Timer uint64 @@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { // NewStat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func NewStat() (Stat, error) { fs, err := NewFS(fs.DefaultProcMountPoint) if err != nil { @@ -155,15 +155,15 @@ func NewStat() (Stat, error) { } // NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func (fs FS) NewStat() (Stat, error) { return fs.Stat() } // Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { fileName := fs.proc.Path("stat") data, err := util.ReadFileNoStat(fileName) diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index cb13891414..20ceb77e2d 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -29,7 +29,7 @@ import ( // https://www.kernel.org/doc/Documentation/sysctl/vm.txt // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string +// and numa_zonelist_order (deprecated) which is a string. type VM struct { AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes BlockDump *int64 // /proc/sys/vm/block_dump @@ -87,7 +87,7 @@ func (fs FS) VM() (*VM, error) { return nil, fmt.Errorf("%s is not a directory", path) } - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index 209e2ac987..c745a4c04f 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) // structs containing the relevant info. More information available here: // https://www.kernel.org/doc/Documentation/sysctl/vm.txt func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) } diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/http.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/http.go deleted file mode 100644 index c4b2a79373..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/http.go +++ /dev/null @@ -1,112 +0,0 @@ -// -// Copyright (c) SAS Institute Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package pkcs9 - -import ( - "bytes" - "crypto" - "crypto/hmac" - "encoding/asn1" - "errors" - "fmt" - "net/http" - - "github.com/sassoftware/relic/lib/pkcs7" - "github.com/sassoftware/relic/lib/x509tools" -) - -// RFC 3161 timestamping - -// Create a HTTP request to request a token from the given URL -func NewRequest(url string, hash crypto.Hash, hashValue []byte) (msg *TimeStampReq, req *http.Request, err error) { - alg, ok := x509tools.PkixDigestAlgorithm(hash) - if !ok { - return nil, nil, errors.New("unknown digest algorithm") - } - msg = &TimeStampReq{ - Version: 1, - MessageImprint: MessageImprint{ - HashAlgorithm: alg, - HashedMessage: hashValue, - }, - Nonce: x509tools.MakeSerial(), - CertReq: true, - } - reqbytes, err := asn1.Marshal(*msg) - if err != nil { - return - } - req, err = http.NewRequest("POST", url, bytes.NewReader(reqbytes)) - if err != nil { - return - } - req.Header.Set("Content-Type", "application/timestamp-query") - return -} - -// Parse a timestamp token from a HTTP response, sanity checking it against the original request nonce -func (req *TimeStampReq) ParseResponse(body []byte) (*pkcs7.ContentInfoSignedData, error) { - respmsg := new(TimeStampResp) - if rest, err := asn1.Unmarshal(body, respmsg); err != nil { - return nil, fmt.Errorf("pkcs9: unmarshalling response: %w", err) - } else if len(rest) != 0 { - return nil, errors.New("pkcs9: trailing bytes in response") - } else if respmsg.Status.Status > StatusGrantedWithMods { - return nil, fmt.Errorf("pkcs9: request denied: status=%d failureInfo=%x", respmsg.Status.Status, respmsg.Status.FailInfo.Bytes) - } - if err := req.SanityCheckToken(&respmsg.TimeStampToken); err != nil { - return nil, fmt.Errorf("pkcs9: token sanity check failed: %w", err) - } - return &respmsg.TimeStampToken, nil -} - -// Sanity check a timestamp token against the nonce in the original request -func (req *TimeStampReq) SanityCheckToken(psd *pkcs7.ContentInfoSignedData) error { - if _, err := psd.Content.Verify(nil, false); err != nil { - return err - } - info, err := unpackTokenInfo(psd) - if err != nil { - return err - } - if req.Nonce.Cmp(info.Nonce) != 0 { - return errors.New("request nonce mismatch") - } - if !hmac.Equal(info.MessageImprint.HashedMessage, req.MessageImprint.HashedMessage) { - return errors.New("message imprint mismatch") - } - return nil -} - -// Unpack TSTInfo from a timestamp token -func unpackTokenInfo(psd *pkcs7.ContentInfoSignedData) (*TSTInfo, error) { - infobytes, err := psd.Content.ContentInfo.Bytes() - if err != nil { - return nil, fmt.Errorf("unpack TSTInfo: %w", err) - } else if infobytes[0] == 0x04 { - // unwrap dummy OCTET STRING - _, err = asn1.Unmarshal(infobytes, &infobytes) - if err != nil { - return nil, fmt.Errorf("unpack TSTInfo: %w", err) - } - } - info := new(TSTInfo) - if _, err := asn1.Unmarshal(infobytes, info); err != nil { - return nil, fmt.Errorf("unpack TSTInfo: %w", err) - } - return info, nil -} diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/iface.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/iface.go deleted file mode 100644 index 69f059d175..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/iface.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright © SAS Institute Inc. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pkcs9 - -import ( - "context" - "crypto" - - "github.com/sassoftware/relic/lib/pkcs7" -) - -// Timestamper is the common interface for the timestamp client and middleware -type Timestamper interface { - Timestamp(ctx context.Context, req *Request) (*pkcs7.ContentInfoSignedData, error) -} - -// Request holds parameters for a timestamp operation -type Request struct { - // EncryptedDigest is the raw encrypted signature value - EncryptedDigest []byte - // Hash is the desired hash function for the timestamp. Ignored for legacy requests. - Hash crypto.Hash - // Legacy indicates a nonstandard microsoft timestamp request, otherwise RFC 3161 is used - Legacy bool -} diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/microsoft.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/microsoft.go deleted file mode 100644 index 69efb76dba..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/microsoft.go +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright (c) SAS Institute Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package pkcs9 - -import ( - "bytes" - "encoding/asn1" - "encoding/base64" - "net/http" - - "github.com/sassoftware/relic/lib/pkcs7" -) - -// Microsoft non-RFC-3161 timestamping -// https://msdn.microsoft.com/en-us/library/windows/desktop/bb931395(v=vs.85).aspx - -type MicrosoftTimeStampRequest struct { - CounterSignatureType asn1.ObjectIdentifier - Attributes struct{} `asn1:"optional"` - Content struct { - ContentType asn1.ObjectIdentifier - Content []byte `asn1:"explicit,tag:0"` - } -} - -func NewLegacyRequest(url string, encryptedDigest []byte) (*http.Request, error) { - var msg MicrosoftTimeStampRequest - msg.CounterSignatureType = OidSpcTimeStampRequest - msg.Content.ContentType = pkcs7.OidData - msg.Content.Content = encryptedDigest - blob, err := asn1.Marshal(msg) - if err != nil { - return nil, err - } - req, err := http.NewRequest("POST", url, bytes.NewReader(blob)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/octet-stream") - return req, nil -} - -func ParseLegacyResponse(body []byte) (*pkcs7.ContentInfoSignedData, error) { - rblob, err := base64.StdEncoding.DecodeString(string(bytes.TrimRight(body, "\x00"))) - if err != nil { - return nil, err - } - psd := new(pkcs7.ContentInfoSignedData) - if _, err := asn1.Unmarshal(rblob, psd); err != nil { - return nil, err - } - return psd, nil -} diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/pkcs7.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/pkcs7.go deleted file mode 100644 index 372eaf746d..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/pkcs7.go +++ /dev/null @@ -1,183 +0,0 @@ -// -// Copyright (c) SAS Institute Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package pkcs9 - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "errors" - "fmt" - "time" - - "github.com/sassoftware/relic/lib/pkcs7" - "github.com/sassoftware/relic/lib/x509tools" -) - -func TimestampAndMarshal(ctx context.Context, psd *pkcs7.ContentInfoSignedData, timestamper Timestamper, authenticode bool) (*TimestampedSignature, error) { - if timestamper != nil { - signerInfo := &psd.Content.SignerInfos[0] - hash, err := x509tools.PkixDigestToHashE(signerInfo.DigestAlgorithm) - if err != nil { - return nil, err - } - token, err := timestamper.Timestamp(ctx, &Request{EncryptedDigest: signerInfo.EncryptedDigest, Hash: hash}) - if err != nil { - return nil, err - } - if authenticode { - err = AddStampToSignedAuthenticode(signerInfo, *token) - } else { - err = AddStampToSignedData(signerInfo, *token) - } - if err != nil { - return nil, err - } - } - verified, err := psd.Content.Verify(nil, false) - if err != nil { - return nil, fmt.Errorf("pkcs7: failed signature self-check: %w", err) - } - ts, err := VerifyOptionalTimestamp(verified) - if err != nil { - return nil, fmt.Errorf("pkcs7: failed signature self-check: %w", err) - } - blob, err := psd.Marshal() - if err != nil { - return nil, err - } - ts.Raw = blob - return &ts, err -} - -// Attach a RFC 3161 timestamp to a PKCS#7 SignerInfo -func AddStampToSignedData(signerInfo *pkcs7.SignerInfo, token pkcs7.ContentInfoSignedData) error { - return signerInfo.UnauthenticatedAttributes.Add(OidAttributeTimeStampToken, token) -} - -// Attach a RFC 3161 timestamp to a PKCS#7 SignerInfo using the OID for authenticode signatures -func AddStampToSignedAuthenticode(signerInfo *pkcs7.SignerInfo, token pkcs7.ContentInfoSignedData) error { - return signerInfo.UnauthenticatedAttributes.Add(OidSpcTimeStampToken, token) -} - -// Validated timestamp token -type CounterSignature struct { - pkcs7.Signature - Hash crypto.Hash - SigningTime time.Time -} - -// Validated signature containing a optional timestamp token -type TimestampedSignature struct { - pkcs7.Signature - CounterSignature *CounterSignature - Raw []byte -} - -// Look for a timestamp (counter-signature or timestamp token) in the -// UnauthenticatedAttributes of the given already-validated signature and check -// its integrity. The certificate chain is not checked; call VerifyChain() on -// the result to validate it fully. Returns nil if no timestamp is present. -func VerifyPkcs7(sig pkcs7.Signature) (*CounterSignature, error) { - var tst pkcs7.ContentInfoSignedData - // check several OIDs for timestamp tokens - err := sig.SignerInfo.UnauthenticatedAttributes.GetOne(OidAttributeTimeStampToken, &tst) - if _, ok := err.(pkcs7.ErrNoAttribute); ok { - err = sig.SignerInfo.UnauthenticatedAttributes.GetOne(OidSpcTimeStampToken, &tst) - } - var imprintHash crypto.Hash - if err == nil { - // timestamptoken is a fully nested signedData containing a TSTInfo - // that digests the parent signature blob - return Verify(&tst, sig.SignerInfo.EncryptedDigest, sig.Intermediates) - } else if _, ok := err.(pkcs7.ErrNoAttribute); ok { - tsi := new(pkcs7.SignerInfo) - if err := sig.SignerInfo.UnauthenticatedAttributes.GetOne(OidAttributeCounterSign, tsi); err != nil { - if _, ok := err.(pkcs7.ErrNoAttribute); ok { - return nil, nil - } - return nil, err - } - // counterSignature is simply a signerinfo. The certificate chain is - // included in the parent structure, and the timestamp signs the - // signature blob from the parent signerinfo - imprintHash, _ = x509tools.PkixDigestToHash(sig.SignerInfo.DigestAlgorithm) - return finishVerify(tsi, sig.SignerInfo.EncryptedDigest, sig.Intermediates, imprintHash, tsi, nil) - } - return nil, err -} - -// Look for a timestamp token or counter-signature in the given signature and -// return a structure that can be used to validate the signature's certificate -// chain. If no timestamp is present, then the current time will be used when -// validating the chain. -func VerifyOptionalTimestamp(sig pkcs7.Signature) (TimestampedSignature, error) { - tsig := TimestampedSignature{Signature: sig} - ts, err := VerifyPkcs7(sig) - if err != nil { - return tsig, err - } - tsig.CounterSignature = ts - return tsig, nil -} - -// Verify that the timestamp token has a valid certificate chain -func (cs CounterSignature) VerifyChain(roots *x509.CertPool, extraCerts []*x509.Certificate) error { - return cs.Signature.VerifyChain(roots, extraCerts, x509.ExtKeyUsageTimeStamping, cs.SigningTime) -} - -// Verify the certificate chain of a PKCS#7 signature. If the signature has a -// valid timestamp token attached, then the timestamp is used for validating -// the primary signature's chain, making the signature valid after the -// certificates have expired. -func (sig TimestampedSignature) VerifyChain(roots *x509.CertPool, extraCerts []*x509.Certificate, usage x509.ExtKeyUsage) error { - var signingTime time.Time - if sig.CounterSignature != nil { - if err := sig.CounterSignature.VerifyChain(roots, extraCerts); err != nil { - return fmt.Errorf("validating timestamp: %w", err) - } - signingTime = sig.CounterSignature.SigningTime - } - return sig.Signature.VerifyChain(roots, extraCerts, usage, signingTime) -} - -// Verify a non-RFC-3161 timestamp token against the given encrypted digest -// from the primary signature. -func VerifyMicrosoftToken(token *pkcs7.ContentInfoSignedData, encryptedDigest []byte) (*CounterSignature, error) { - sig, err := token.Content.Verify(nil, false) - if err != nil { - return nil, err - } - content, err := token.Content.ContentInfo.Bytes() - if err != nil { - return nil, err - } - if !bytes.Equal(content, encryptedDigest) { - return nil, errors.New("timestamp does not match the enclosing signature") - } - hash, _ := x509tools.PkixDigestToHash(sig.SignerInfo.DigestAlgorithm) - signingTime, err := sig.SignerInfo.SigningTime() - if err != nil { - return nil, err - } - return &CounterSignature{ - Signature: sig, - Hash: hash, - SigningTime: signingTime, - }, nil -} diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/structs.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/structs.go deleted file mode 100644 index 358c53b810..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/structs.go +++ /dev/null @@ -1,117 +0,0 @@ -// -// Copyright (c) SAS Institute Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -// PKCS#9 is a specification for trusted timestamping. Timestamping services -// create a timestamp token which includes a known-good timestamp with a -// signature over it. The token can be attached to a document to prove that it -// existed at the indicated time. When attached to a PKCS#7 signedData -// structure, the timestamp proves that the primary signature was created -// during the valid lifespan of the signing certificate, allowing it to be -// validated after the certificates have expired. -// -// See RFC 3161 -package pkcs9 - -import ( - "crypto/x509/pkix" - "encoding/asn1" - "math/big" - "time" - - "github.com/sassoftware/relic/lib/pkcs7" -) - -const ( - StatusGranted = iota - StatusGrantedWithMods - StatusRejection - StatusWaiting - StatusRevocationWarning - StatusRevocationNotification - - FailureBadAlg = 0 - FailureBadRequest = 2 - FailureBadDataFormat = 5 - FailureTimeNotAvailable = 14 - FailureUnacceptedPolicy = 15 - FailureUnacceptedExtension = 16 - FailureAddInfoNotAvailable = 17 - SystemFailure = 25 -) - -var ( - OidKeyPurposeTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} - OidTSTInfo = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 1, 4} - OidAttributeTimeStampToken = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 2, 14} - OidAttributeCounterSign = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 6} - - OidSpcTimeStampRequest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 3, 2, 1} - // undocumented(?) alternative to OidAttributeTimeStampToken found in Authenticode signatures - OidSpcTimeStampToken = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 3, 3, 1} -) - -type TimeStampReq struct { - Version int - MessageImprint MessageImprint - ReqPolicy asn1.ObjectIdentifier `asn1:"optional"` - Nonce *big.Int `asn1:"optional"` - CertReq bool `asn1:"default:false"` - Extensions []pkix.Extension `asn1:"optional,implicit,tag:0"` -} - -type MessageImprint struct { - HashAlgorithm pkix.AlgorithmIdentifier - HashedMessage []byte -} - -type TimeStampResp struct { - Status PKIStatusInfo - TimeStampToken pkcs7.ContentInfoSignedData `asn1:"optional"` -} - -type PKIStatusInfo struct { - Status int - StatusString []string `asn1:"optional"` - FailInfo asn1.BitString `asn1:"optional"` -} - -type TSTInfo struct { - Version int - Policy asn1.ObjectIdentifier - MessageImprint MessageImprint - SerialNumber *big.Int - GenTime asn1.RawValue - Accuracy Accuracy `asn1:"optional"` - Ordering bool `asn1:"optional,default:false"` - Nonce *big.Int `asn1:"optional"` - TSA GeneralName `asn1:"optional,implicit,tag:0"` - Extensions []pkix.Extension `asn1:"optional,implicit,tag:1"` -} - -func (i *TSTInfo) SigningTime() (time.Time, error) { - return pkcs7.ParseTime(i.GenTime) -} - -type Accuracy struct { - Seconds int `asn1:"optional"` - Millis int `asn1:"optional,tag:0"` - Micros int `asn1:"optional,tag:1"` -} - -type GeneralName struct { - // See RFC 3280 - Value asn1.RawValue -} diff --git a/vendor/github.com/sassoftware/relic/lib/pkcs9/verify.go b/vendor/github.com/sassoftware/relic/lib/pkcs9/verify.go deleted file mode 100644 index ac605b983f..0000000000 --- a/vendor/github.com/sassoftware/relic/lib/pkcs9/verify.go +++ /dev/null @@ -1,102 +0,0 @@ -// -// Copyright (c) SAS Institute Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package pkcs9 - -import ( - "crypto" - "crypto/hmac" - "crypto/x509" - "errors" - "fmt" - "time" - - "github.com/sassoftware/relic/lib/pkcs7" - "github.com/sassoftware/relic/lib/x509tools" -) - -// Verify that the digest (imprint) in a timestamp token matches the given data -func (i MessageImprint) Verify(data []byte) error { - hash, err := x509tools.PkixDigestToHashE(i.HashAlgorithm) - if err != nil { - return fmt.Errorf("pkcs9: %w", err) - } - w := hash.New() - w.Write(data) - digest := w.Sum(nil) - if !hmac.Equal(digest, i.HashedMessage) { - return errors.New("pkcs9: digest check failed") - } - return nil -} - -// Verify a timestamp token using external data -func Verify(tst *pkcs7.ContentInfoSignedData, data []byte, certs []*x509.Certificate) (*CounterSignature, error) { - if len(tst.Content.SignerInfos) != 1 { - return nil, errors.New("timestamp should have exactly one SignerInfo") - } - tsi := tst.Content.SignerInfos[0] - tsicerts, certErr := tst.Content.Certificates.Parse() - if len(tsicerts) != 0 { - // keep both sets of certs just in case - certs = append(certs, tsicerts...) - } - // verify the imprint in the TSTInfo - tstinfo, err := unpackTokenInfo(tst) - if err != nil { - return nil, err - } - if err := tstinfo.MessageImprint.Verify(data); err != nil { - return nil, fmt.Errorf("verifying timestamp imprint: %w", err) - } - imprintHash, _ := x509tools.PkixDigestToHash(tstinfo.MessageImprint.HashAlgorithm) - // now the signature is over the TSTInfo blob - verifyBlob, err := tst.Content.ContentInfo.Bytes() - if err != nil { - return nil, err - } - - return finishVerify(&tsi, verifyBlob, certs, imprintHash, tstinfo, certErr) -} - -type timeSource interface { - SigningTime() (time.Time, error) -} - -func finishVerify(tsi *pkcs7.SignerInfo, blob []byte, certs []*x509.Certificate, hash crypto.Hash, timeSource timeSource, certErr error) (*CounterSignature, error) { - cert, err := tsi.Verify(blob, false, certs) - if err != nil { - if errors.As(err, &pkcs7.MissingCertificateError{}) && certErr != nil { - // surface saved parse error - return nil, certErr - } - return nil, err - } - signingTime, err := timeSource.SigningTime() - if err != nil { - return nil, fmt.Errorf("parsing timestamp: %w", err) - } - return &CounterSignature{ - Signature: pkcs7.Signature{ - SignerInfo: tsi, - Certificate: cert, - Intermediates: certs, - CertError: certErr, - }, - Hash: hash, - SigningTime: signingTime, - }, nil -} diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcio.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcio.go index 689789cc2f..e4f514b83b 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcio.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcio.go @@ -22,15 +22,15 @@ import ( "crypto/rand" "crypto/sha256" "crypto/x509" + "errors" "fmt" "net/url" "os" - "github.com/pkg/errors" "golang.org/x/term" - "github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioroots" "github.com/sigstore/cosign/cmd/cosign/cli/options" + "github.com/sigstore/cosign/internal/pkg/cosign/fulcio/fulcioroots" "github.com/sigstore/cosign/pkg/cosign" "github.com/sigstore/cosign/pkg/providers" "github.com/sigstore/fulcio/pkg/api" @@ -39,9 +39,15 @@ import ( ) const ( - FlowNormal = "normal" - FlowDevice = "device" - FlowToken = "token" + flowNormal = "normal" + flowDevice = "device" + flowToken = "token" + // spacing is intentional to have this indented + privacyStatement = ` + Note that there may be personally identifiable information associated with this signed artifact. + This may include the email address associated with the account with which you authenticate. + This information will be used for signing this artifact and will be stored in public transparency logs and cannot be removed later.` + privacyStatementConfirmation = " By typing 'y', you attest that you grant (or have permission to grant) and agree to have this information stored permanently in transparency logs." ) type oidcConnector interface { @@ -56,7 +62,7 @@ func (rf *realConnector) OIDConnect(url, clientID, secret, redirectURL string) ( return oauthflow.OIDConnect(url, clientID, secret, redirectURL, rf.flow) } -func getCertForOauthID(priv *ecdsa.PrivateKey, fc api.Client, connector oidcConnector, oidcIssuer, oidcClientID, oidcClientSecret, oidcRedirectURL string) (*api.CertificateResponse, error) { +func getCertForOauthID(priv *ecdsa.PrivateKey, fc api.LegacyClient, connector oidcConnector, oidcIssuer, oidcClientID, oidcClientSecret, oidcRedirectURL string) (*api.CertificateResponse, error) { pubBytes, err := x509.MarshalPKIXPublicKey(&priv.PublicKey) if err != nil { return nil, err @@ -86,15 +92,14 @@ func getCertForOauthID(priv *ecdsa.PrivateKey, fc api.Client, connector oidcConn } // GetCert returns the PEM-encoded signature of the OIDC identity returned as part of an interactive oauth2 flow plus the PEM-encoded cert chain. -func GetCert(ctx context.Context, priv *ecdsa.PrivateKey, idToken, flow, oidcIssuer, oidcClientID, oidcClientSecret, oidcRedirectURL string, fClient api.Client) (*api.CertificateResponse, error) { +func GetCert(ctx context.Context, priv *ecdsa.PrivateKey, idToken, flow, oidcIssuer, oidcClientID, oidcClientSecret, oidcRedirectURL string, fClient api.LegacyClient) (*api.CertificateResponse, error) { c := &realConnector{} switch flow { - case FlowDevice: - c.flow = oauthflow.NewDeviceFlowTokenGetter( - oidcIssuer, oauthflow.SigstoreDeviceURL, oauthflow.SigstoreTokenURL) - case FlowNormal: + case flowDevice: + c.flow = oauthflow.NewDeviceFlowTokenGetterForIssuer(oidcIssuer) + case flowNormal: c.flow = oauthflow.DefaultIDTokenGetter - case FlowToken: + case flowToken: c.flow = &oauthflow.StaticTokenGetter{RawToken: idToken} default: return nil, fmt.Errorf("unsupported oauth flow: %s", flow) @@ -114,21 +119,30 @@ type Signer struct { func NewSigner(ctx context.Context, ko options.KeyOpts) (*Signer, error) { fClient, err := NewClient(ko.FulcioURL) if err != nil { - return nil, errors.Wrap(err, "creating Fulcio client") + return nil, fmt.Errorf("creating Fulcio client: %w", err) } idToken := ko.IDToken + var provider providers.Interface // If token is not set in the options, get one from the provders if idToken == "" && providers.Enabled(ctx) && !ko.OIDCDisableProviders { - idToken, err = providers.Provide(ctx, "sigstore") + if ko.OIDCProvider != "" { + provider, err = providers.ProvideFrom(ctx, ko.OIDCProvider) + if err != nil { + return nil, fmt.Errorf("getting provider: %w", err) + } + idToken, err = provider.Provide(ctx, "sigstore") + } else { + idToken, err = providers.Provide(ctx, "sigstore") + } if err != nil { - return nil, errors.Wrap(err, "fetching ambient OIDC credentials") + return nil, fmt.Errorf("fetching ambient OIDC credentials: %w", err) } } priv, err := cosign.GeneratePrivateKey() if err != nil { - return nil, errors.Wrap(err, "generating cert") + return nil, fmt.Errorf("generating cert: %w", err) } signer, err := signature.LoadECDSASignerVerifier(priv, crypto.SHA256) if err != nil { @@ -136,22 +150,31 @@ func NewSigner(ctx context.Context, ko options.KeyOpts) (*Signer, error) { } fmt.Fprintln(os.Stderr, "Retrieving signed certificate...") + fmt.Fprintln(os.Stderr, privacyStatement) + var flow string switch { case ko.FulcioAuthFlow != "": // Caller manually set flow option. flow = ko.FulcioAuthFlow case idToken != "": - flow = FlowToken + flow = flowToken case !term.IsTerminal(0): fmt.Fprintln(os.Stderr, "Non-interactive mode detected, using device flow.") - flow = FlowDevice + flow = flowDevice default: - flow = FlowNormal + ok, err := cosign.ConfirmPrompt(privacyStatementConfirmation, ko.SkipConfirmation) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("no confirmation") + } + flow = flowNormal } Resp, err := GetCert(ctx, priv, idToken, flow, ko.OIDCIssuer, ko.OIDCClientID, ko.OIDCClientSecret, ko.OIDCRedirectURL, fClient) // TODO, use the chain. if err != nil { - return nil, errors.Wrap(err, "retrieving cert") + return nil, fmt.Errorf("retrieving cert: %w", err) } f := &Signer{ @@ -171,15 +194,15 @@ func (f *Signer) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, var _ signature.Signer = &Signer{} -func GetRoots() *x509.CertPool { +func GetRoots() (*x509.CertPool, error) { return fulcioroots.Get() } -func GetIntermediates() *x509.CertPool { +func GetIntermediates() (*x509.CertPool, error) { return fulcioroots.GetIntermediates() } -func NewClient(fulcioURL string) (api.Client, error) { +func NewClient(fulcioURL string) (api.LegacyClient, error) { fulcioServer, err := url.Parse(fulcioURL) if err != nil { return nil, err diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go deleted file mode 100644 index c0890bd77c..0000000000 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioroots/fulcioroots.go +++ /dev/null @@ -1,154 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fulcioroots - -import ( - "bytes" - "context" - "crypto/x509" - "os" - "sync" - - "github.com/pkg/errors" - "github.com/sigstore/cosign/pkg/cosign/tuf" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -var ( - rootsOnce sync.Once - roots *x509.CertPool - intermediates *x509.CertPool -) - -// This is the root in the fulcio project. -var fulcioTargetStr = `fulcio.crt.pem` - -// This is the v1 migrated root. -var fulcioV1TargetStr = `fulcio_v1.crt.pem` - -// The untrusted intermediate CA certificate, used for chain building -// TODO: Remove once this is bundled in TUF metadata. -var fulcioIntermediateV1 = `-----BEGIN CERTIFICATE----- -MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMw -KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y -MjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3Jl -LmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0C -AQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV7 -7LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS -0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYB -BQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjp -KFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZI -zj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJR -nZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsP -mygUY7Ii2zbdCdliiow= ------END CERTIFICATE-----` - -const ( - altRoot = "SIGSTORE_ROOT_FILE" -) - -func Get() *x509.CertPool { - rootsOnce.Do(func() { - var err error - roots, intermediates, err = initRoots() - if err != nil { - panic(err) - } - }) - return roots -} - -func GetIntermediates() *x509.CertPool { - rootsOnce.Do(func() { - var err error - roots, intermediates, err = initRoots() - if err != nil { - panic(err) - } - }) - return intermediates -} - -func initRoots() (*x509.CertPool, *x509.CertPool, error) { - var rootPool *x509.CertPool - var intermediatePool *x509.CertPool - - rootEnv := os.Getenv(altRoot) - if rootEnv != "" { - raw, err := os.ReadFile(rootEnv) - if err != nil { - return nil, nil, errors.Wrap(err, "error reading root PEM file") - } - certs, err := cryptoutils.UnmarshalCertificatesFromPEM(raw) - if err != nil { - return nil, nil, errors.Wrap(err, "error unmarshalling certificates") - } - for _, cert := range certs { - // root certificates are self-signed - if bytes.Equal(cert.RawSubject, cert.RawIssuer) { - if rootPool == nil { - rootPool = x509.NewCertPool() - } - rootPool.AddCert(cert) - } else { - if intermediatePool == nil { - intermediatePool = x509.NewCertPool() - } - intermediatePool.AddCert(cert) - } - } - } else { - tufClient, err := tuf.NewFromEnv(context.Background()) - if err != nil { - return nil, nil, errors.Wrap(err, "initializing tuf") - } - defer tufClient.Close() - // Retrieve from the embedded or cached TUF root. If expired, a network - // call is made to update the root. - targets, err := tufClient.GetTargetsByMeta(tuf.Fulcio, []string{fulcioTargetStr, fulcioV1TargetStr}) - if err != nil { - return nil, nil, errors.New("error getting targets") - } - if len(targets) == 0 { - return nil, nil, errors.New("none of the Fulcio roots have been found") - } - for _, t := range targets { - certs, err := cryptoutils.UnmarshalCertificatesFromPEM(t.Target) - if err != nil { - return nil, nil, errors.Wrap(err, "error unmarshalling certificates") - } - for _, cert := range certs { - // root certificates are self-signed - if bytes.Equal(cert.RawSubject, cert.RawIssuer) { - if rootPool == nil { - rootPool = x509.NewCertPool() - } - rootPool.AddCert(cert) - } else { - if intermediatePool == nil { - intermediatePool = x509.NewCertPool() - } - intermediatePool.AddCert(cert) - } - } - } - if intermediatePool == nil { - intermediatePool = x509.NewCertPool() - } - intermediatePool.AppendCertsFromPEM([]byte(fulcioIntermediateV1)) - } - return rootPool, intermediatePool, nil -} diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go index c2af470cc2..38287ab3e0 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctl/verify.go @@ -20,18 +20,17 @@ import ( "crypto/sha256" "crypto/x509" "encoding/json" - "encoding/pem" + "errors" "fmt" "os" ct "github.com/google/certificate-transparency-go" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/google/certificate-transparency-go/x509util" - "github.com/pkg/errors" "github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctutil" - "github.com/sigstore/cosign/pkg/cosign/tuf" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/tuf" ) // This is the CT log public key target name @@ -81,20 +80,19 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { if err != nil { return err } - defer tufClient.Close() targets, err := tufClient.GetTargetsByMeta(tuf.CTFE, []string{ctPublicKeyStr}) if err != nil { return err } for _, t := range targets { - pub, err := getPublicKey(t.Target) + pub, err := cryptoutils.UnmarshalPEMToPublicKey(t.Target) if err != nil { return err } keyID, err := ctutil.GetCTLogID(pub) if err != nil { - return errors.Wrap(err, "error getting CTFE public key hash") + return fmt.Errorf("error getting CTFE public key hash") } pubKeys[keyID] = logIDMetadata{pub, t.Status} } @@ -102,15 +100,15 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { fmt.Fprintf(os.Stderr, "**Warning** Using a non-standard public key for verifying SCT: %s\n", rootEnv) raw, err := os.ReadFile(rootEnv) if err != nil { - return errors.Wrap(err, "error reading alternate public key file") + return fmt.Errorf("error reading alternate public key file") } - pubKey, err := getPublicKey(raw) + pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) if err != nil { - return errors.Wrap(err, "error parsing alternate public key from the file") + return fmt.Errorf("error parsing alternate public key from the file") } keyID, err := ctutil.GetCTLogID(pubKey) if err != nil { - return errors.Wrap(err, "error getting CTFE public key hash") + return fmt.Errorf("error getting CTFE public key hash") } pubKeys[keyID] = logIDMetadata{pubKey, tuf.Active} } @@ -150,7 +148,7 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { } err := ctutil.VerifySCT(pubKeyMetadata.pubKey, []*ctx509.Certificate{cert, certChain[0]}, sct, true) if err != nil { - return errors.Wrap(err, "error verifying embedded SCT") + return fmt.Errorf("error verifying embedded SCT") } if pubKeyMetadata.status != tuf.Active { fmt.Fprintf(os.Stderr, "**Info** Successfully verified embedded SCT using an expired verification key\n") @@ -162,7 +160,7 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { // check SCT in response header var addChainResp ct.AddChainResponse if err := json.Unmarshal(rawSCT, &addChainResp); err != nil { - return errors.Wrap(err, "unmarshal") + return fmt.Errorf("unmarshal") } sct, err := addChainResp.ToSignedCertificateTimestamp() if err != nil { @@ -174,7 +172,7 @@ func VerifySCT(ctx context.Context, certPEM, chainPEM, rawSCT []byte) error { } err = ctutil.VerifySCT(pubKeyMetadata.pubKey, []*ctx509.Certificate{cert}, sct, false) if err != nil { - return errors.Wrap(err, "error verifying SCT") + return fmt.Errorf("error verifying SCT") } if pubKeyMetadata.status != tuf.Active { fmt.Fprintf(os.Stderr, "**Info** Successfully verified SCT using an expired verification key\n") @@ -197,28 +195,3 @@ func VerifyEmbeddedSCT(ctx context.Context, chain []*x509.Certificate) error { } return VerifySCT(ctx, certPEM, chainPEM, []byte{}) } - -// Given a byte array, try to construct a public key from it. -// Supports PEM encoded public keys, falling back to DER. Supports -// PKIX and PKCS1 encoded keys. -func getPublicKey(in []byte) (crypto.PublicKey, error) { - var pubKey crypto.PublicKey - var err error - var derBytes []byte - pemBlock, _ := pem.Decode(in) - if pemBlock == nil { - fmt.Fprintf(os.Stderr, "Failed to decode non-standard public key for verifying SCT using PEM decode, trying as DER") - derBytes = in - } else { - derBytes = pemBlock.Bytes - } - pubKey, err = x509.ParsePKIXPublicKey(derBytes) - if err != nil { - // Try using the PKCS1 before giving up. - pubKey, err = x509.ParsePKCS1PublicKey(derBytes) - if err != nil { - return nil, errors.Wrap(err, "failed to parse CT log public key") - } - } - return pubKey, nil -} diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctutil/ctutil.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctutil/ctutil.go index a764b8e32f..9d6868169d 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctutil/ctutil.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctutil/ctutil.go @@ -52,26 +52,26 @@ func LeafHashB64(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, // // This function can be used with three different types of leaf certificate: // - X.509 Certificate: -// If using this function to calculate the leaf hash for a normal X.509 -// certificate then it is enough to just provide the end entity -// certificate in chain. This case assumes that the SCT being provided is -// not embedded within the leaf certificate provided, i.e. the certificate -// is what was submitted to the Certificate Transparency Log in order to -// obtain the SCT. For this case, set embedded to false. +// If using this function to calculate the leaf hash for a normal X.509 +// certificate then it is enough to just provide the end entity +// certificate in chain. This case assumes that the SCT being provided is +// not embedded within the leaf certificate provided, i.e. the certificate +// is what was submitted to the Certificate Transparency Log in order to +// obtain the SCT. For this case, set embedded to false. // - Precertificate: -// If using this function to calculate the leaf hash for a precertificate -// then the issuing certificate must also be provided in chain. The -// precertificate should be at chain[0], and its issuer at chain[1]. For -// this case, set embedded to false. +// If using this function to calculate the leaf hash for a precertificate +// then the issuing certificate must also be provided in chain. The +// precertificate should be at chain[0], and its issuer at chain[1]. For +// this case, set embedded to false. // - X.509 Certificate containing the SCT embedded within it: -// If using this function to calculate the leaf hash for a certificate -// where the SCT provided is embedded within the certificate you -// are providing at chain[0], set embedded to true. LeafHash will -// calculate the leaf hash by building the corresponding precertificate. -// LeafHash will return an error if the provided SCT cannot be found -// embedded within chain[0]. As with the precertificate case, the issuing -// certificate must also be provided in chain. The certificate containing -// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// If using this function to calculate the leaf hash for a certificate +// where the SCT provided is embedded within the certificate you +// are providing at chain[0], set embedded to true. LeafHash will +// calculate the leaf hash by building the corresponding precertificate. +// LeafHash will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. // // Note: LeafHash doesn't check that the provided SCT verifies for the given // chain. It simply calculates what the leaf hash would be for the given @@ -91,25 +91,25 @@ func LeafHash(chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, emb // // This function can be used with three different types of leaf certificate: // - X.509 Certificate: -// If using this function to verify an SCT for a normal X.509 certificate -// then it is enough to just provide the end entity certificate in chain. -// This case assumes that the SCT being provided is not embedded within -// the leaf certificate provided, i.e. the certificate is what was -// submitted to the Certificate Transparency Log in order to obtain the -// SCT. For this case, set embedded to false. +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. // - Precertificate: -// If using this function to verify an SCT for a precertificate then the -// issuing certificate must also be provided in chain. The precertificate -// should be at chain[0], and its issuer at chain[1]. For this case, set -// embedded to false. +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. // - X.509 Certificate containing the SCT embedded within it: -// If the SCT you wish to verify is embedded within the certificate you -// are providing at chain[0], set embedded to true. VerifySCT will -// verify the provided SCT by building the corresponding precertificate. -// VerifySCT will return an error if the provided SCT cannot be found -// embedded within chain[0]. As with the precertificate case, the issuing -// certificate must also be provided in chain. The certificate containing -// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { s, err := ct.NewSignatureVerifier(pubKey) if err != nil { @@ -126,25 +126,25 @@ func VerifySCT(pubKey crypto.PublicKey, chain []*x509.Certificate, sct *ct.Signe // // This function can be used with three different types of leaf certificate: // - X.509 Certificate: -// If using this function to verify an SCT for a normal X.509 certificate -// then it is enough to just provide the end entity certificate in chain. -// This case assumes that the SCT being provided is not embedded within -// the leaf certificate provided, i.e. the certificate is what was -// submitted to the Certificate Transparency Log in order to obtain the -// SCT. For this case, set embedded to false. +// If using this function to verify an SCT for a normal X.509 certificate +// then it is enough to just provide the end entity certificate in chain. +// This case assumes that the SCT being provided is not embedded within +// the leaf certificate provided, i.e. the certificate is what was +// submitted to the Certificate Transparency Log in order to obtain the +// SCT. For this case, set embedded to false. // - Precertificate: -// If using this function to verify an SCT for a precertificate then the -// issuing certificate must also be provided in chain. The precertificate -// should be at chain[0], and its issuer at chain[1]. For this case, set -// embedded to false. +// If using this function to verify an SCT for a precertificate then the +// issuing certificate must also be provided in chain. The precertificate +// should be at chain[0], and its issuer at chain[1]. For this case, set +// embedded to false. // - X.509 Certificate containing the SCT embedded within it: -// If the SCT you wish to verify is embedded within the certificate you -// are providing at chain[0], set embedded to true. VerifySCT will -// verify the provided SCT by building the corresponding precertificate. -// VerifySCT will return an error if the provided SCT cannot be found -// embedded within chain[0]. As with the precertificate case, the issuing -// certificate must also be provided in chain. The certificate containing -// the embedded SCT should be at chain[0], and its issuer at chain[1]. +// If the SCT you wish to verify is embedded within the certificate you +// are providing at chain[0], set embedded to true. VerifySCT will +// verify the provided SCT by building the corresponding precertificate. +// VerifySCT will return an error if the provided SCT cannot be found +// embedded within chain[0]. As with the precertificate case, the issuing +// certificate must also be provided in chain. The certificate containing +// the embedded SCT should be at chain[0], and its issuer at chain[1]. func VerifySCTWithVerifier(sv *ct.SignatureVerifier, chain []*x509.Certificate, sct *ct.SignedCertificateTimestamp, embedded bool) error { if sv == nil { return errors.New("ct.SignatureVerifier is nil") diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attach.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attach.go index 1553c83479..7829b52f54 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attach.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attach.go @@ -17,6 +17,7 @@ package options import ( "fmt" + "strings" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/spf13/cobra" @@ -60,6 +61,7 @@ func (o *AttachSBOMOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.SBOM, "sbom", "", "path to the sbom, or {-} for stdin") + _ = cmd.Flags().SetAnnotation("sbom", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.SBOMType, "type", "spdx", "type of sbom (spdx|cyclonedx|syft)") @@ -69,12 +71,16 @@ func (o *AttachSBOMOptions) AddFlags(cmd *cobra.Command) { } func (o *AttachSBOMOptions) MediaType() (types.MediaType, error) { + var looksLikeJSON bool + if strings.HasSuffix(o.SBOM, ".json") { + looksLikeJSON = true + } switch o.SBOMType { case "cyclonedx": if o.SBOMInputFormat != "" && o.SBOMInputFormat != ctypes.XMLInputFormat && o.SBOMInputFormat != ctypes.JSONInputFormat { return "invalid", fmt.Errorf("invalid SBOM input format: %q, expected (json|xml)", o.SBOMInputFormat) } - if o.SBOMInputFormat == ctypes.JSONInputFormat { + if o.SBOMInputFormat == ctypes.JSONInputFormat || looksLikeJSON { return ctypes.CycloneDXJSONMediaType, nil } return ctypes.CycloneDXXMLMediaType, nil @@ -83,7 +89,7 @@ func (o *AttachSBOMOptions) MediaType() (types.MediaType, error) { if o.SBOMInputFormat != "" && o.SBOMInputFormat != ctypes.TextInputFormat && o.SBOMInputFormat != ctypes.JSONInputFormat { return "invalid", fmt.Errorf("invalid SBOM input format: %q, expected (json|text)", o.SBOMInputFormat) } - if o.SBOMInputFormat == ctypes.JSONInputFormat { + if o.SBOMInputFormat == ctypes.JSONInputFormat || looksLikeJSON { return ctypes.SPDXJSONMediaType, nil } return ctypes.SPDXMediaType, nil @@ -99,14 +105,14 @@ func (o *AttachSBOMOptions) MediaType() (types.MediaType, error) { // AttachAttestationOptions is the top level wrapper for the attach attestation command. type AttachAttestationOptions struct { - Attestation string - Registry RegistryOptions + Attestations []string + Registry RegistryOptions } // AddFlags implements Interface func (o *AttachAttestationOptions) AddFlags(cmd *cobra.Command) { o.Registry.AddFlags(cmd) - cmd.Flags().StringVar(&o.Attestation, "attestation", "", + cmd.Flags().StringArrayVarP(&o.Attestations, "attestation", "", nil, "path to the attestation envelope") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attest.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attest.go index caa1db5213..43fe37a27c 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attest.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/attest.go @@ -21,13 +21,15 @@ import ( // AttestOptions is the top level wrapper for the attest command. type AttestOptions struct { - Key string - Cert string - CertChain string - NoUpload bool - Force bool - Recursive bool - Replace bool + Key string + Cert string + CertChain string + NoUpload bool + Force bool + Recursive bool + Replace bool + SkipConfirmation bool + NoTlogUpload bool Rekor RekorOptions Fulcio FulcioOptions @@ -50,15 +52,18 @@ func (o *AttestOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "path to the private key file, KMS URI or Kubernetes Secret") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{"key"}) - cmd.Flags().StringVar(&o.Cert, "cert", "", + cmd.Flags().StringVar(&o.Cert, "certificate", "", "path to the X.509 certificate in PEM format to include in the OCI Signature") + _ = cmd.Flags().SetAnnotation("certificate", cobra.BashCompFilenameExt, []string{"cert"}) - cmd.Flags().StringVar(&o.CertChain, "cert-chain", "", + cmd.Flags().StringVar(&o.CertChain, "certificate-chain", "", "path to a list of CA X.509 certificates in PEM format which will be needed "+ "when building the certificate chain for the signing certificate. "+ "Must start with the parent intermediate CA certificate of the "+ "signing certificate and end with the root certificate. Included in the OCI Signature") + _ = cmd.Flags().SetAnnotation("certificate-chain", cobra.BashCompFilenameExt, []string{"cert"}) cmd.Flags().BoolVar(&o.NoUpload, "no-upload", false, "do not upload the generated attestation") @@ -71,4 +76,10 @@ func (o *AttestOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().BoolVarP(&o.Replace, "replace", "", false, "") + + cmd.Flags().BoolVarP(&o.SkipConfirmation, "yes", "y", false, + "skip confirmation prompts for non-destructive operations") + + cmd.Flags().BoolVar(&o.NoTlogUpload, "no-tlog-upload", false, + "whether to not upload the transparency log") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/certificate.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/certificate.go index 615842c810..f4effe5e1e 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/certificate.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/certificate.go @@ -20,31 +20,55 @@ import ( // CertVerifyOptions is the wrapper for certificate verification. type CertVerifyOptions struct { - Cert string - CertEmail string - CertOidcIssuer string - CertChain string - EnforceSCT bool + Cert string + CertEmail string + CertOidcIssuer string + CertGithubWorkflowTrigger string + CertGithubWorkflowSha string + CertGithubWorkflowName string + CertGithubWorkflowRepository string + CertGithubWorkflowRef string + CertChain string + EnforceSCT bool } var _ Interface = (*RekorOptions)(nil) // AddFlags implements Interface func (o *CertVerifyOptions) AddFlags(cmd *cobra.Command) { - cmd.Flags().StringVar(&o.Cert, "cert", "", - "path to the public certificate") + cmd.Flags().StringVar(&o.Cert, "certificate", "", + "path to the public certificate. The certificate will be verified against the Fulcio roots if the --certificate-chain option is not passed.") + _ = cmd.Flags().SetAnnotation("certificate", cobra.BashCompFilenameExt, []string{"cert"}) - cmd.Flags().StringVar(&o.CertEmail, "cert-email", "", + cmd.Flags().StringVar(&o.CertEmail, "certificate-email", "", "the email expected in a valid Fulcio certificate") - cmd.Flags().StringVar(&o.CertOidcIssuer, "cert-oidc-issuer", "", + cmd.Flags().StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "the OIDC issuer expected in a valid Fulcio certificate, e.g. https://token.actions.githubusercontent.com or https://oauth2.sigstore.dev/auth") - cmd.Flags().StringVar(&o.CertChain, "cert-chain", "", + // -- Cert extensions begin -- + // Source: https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md + cmd.Flags().StringVar(&o.CertGithubWorkflowTrigger, "certificate-github-workflow-trigger", "", + "contains the event_name claim from the GitHub OIDC Identity token that contains the name of the event that triggered the workflow run") + + cmd.Flags().StringVar(&o.CertGithubWorkflowSha, "certificate-github-workflow-sha", "", + "contains the sha claim from the GitHub OIDC Identity token that contains the commit SHA that the workflow run was based upon.") + + cmd.Flags().StringVar(&o.CertGithubWorkflowName, "certificate-github-workflow-name", "", + "contains the workflow claim from the GitHub OIDC Identity token that contains the name of the executed workflow.") + + cmd.Flags().StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", + "contains the repository claim from the GitHub OIDC Identity token that contains the repository that the workflow run was based upon") + + cmd.Flags().StringVar(&o.CertGithubWorkflowRef, "certificate-github-workflow-ref", "", + "contains the ref claim from the GitHub OIDC Identity token that contains the git ref that the workflow run was based upon.") + // -- Cert extensions end -- + cmd.Flags().StringVar(&o.CertChain, "certificate-chain", "", "path to a list of CA certificates in PEM format which will be needed "+ "when building the certificate chain for the signing certificate. "+ "Must start with the parent intermediate CA certificate of the "+ "signing certificate and end with the root certificate") + _ = cmd.Flags().SetAnnotation("certificate-chain", cobra.BashCompFilenameExt, []string{"cert"}) cmd.Flags().BoolVar(&o.EnforceSCT, "enforce-sct", false, "whether to enforce that a certificate contain an embedded SCT, a proof of "+ diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/clean.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/clean.go index 32b4907b47..3bda7b0e20 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/clean.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/clean.go @@ -27,5 +27,6 @@ var _ Interface = (*CleanOptions)(nil) func (c *CleanOptions) AddFlags(cmd *cobra.Command) { c.Registry.AddFlags(cmd) cmd.Flags().StringVarP(&c.CleanType, "type", "", "all", "a type of clean: (default: all)") + // TODO: Rename to --skip-confirmation for consistency? cmd.Flags().BoolVarP(&c.Force, "force", "f", false, "do not prompt for confirmation") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/download.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/download.go new file mode 100644 index 0000000000..d61abdf277 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/download.go @@ -0,0 +1,31 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package options + +import "github.com/spf13/cobra" + +// DownloadOptions is the struct for control +type SBOMDownloadOptions struct { + Platform string // Platform to download sboms +} + +var _ Interface = (*SBOMDownloadOptions)(nil) + +// AddFlags implements Interface +func (o *SBOMDownloadOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.Platform, "platform", "", + "download SBOM for a specific platform image") +} diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/files.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/files.go index 5dda2572fc..1ec62b2e9b 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/files.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/files.go @@ -54,4 +54,5 @@ func (o *FilesOptions) String() string { func (o *FilesOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringSliceVarP(&o.Files, "files", "f", nil, ":[platform/arch]") + _ = cmd.Flags().SetAnnotation("files", cobra.BashCompFilenameExt, []string{}) } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/import_key_pair.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/import_key_pair.go index b4d3ac3d4b..fde2719162 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/import_key_pair.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/import_key_pair.go @@ -31,4 +31,5 @@ var _ Interface = (*ImportKeyPairOptions)(nil) func (o *ImportKeyPairOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "import key pair to use for signing") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/initialize.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/initialize.go index 6c2aa744b5..0929f530e5 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/initialize.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/initialize.go @@ -16,7 +16,7 @@ package options import ( - "github.com/sigstore/cosign/pkg/cosign/tuf" + "github.com/sigstore/sigstore/pkg/tuf" "github.com/spf13/cobra" ) @@ -35,4 +35,5 @@ func (o *InitializeOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Root, "root", "", "path to trusted initial root. defaults to embedded root") + _ = cmd.Flags().SetAnnotation("root", cobra.BashCompSubdirsInDir, []string{}) } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/key.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/key.go index 55edca2856..567faf58d4 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/key.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/key.go @@ -29,8 +29,11 @@ type KeyOpts struct { OIDCClientID string OIDCClientSecret string OIDCRedirectURL string - OIDCDisableProviders bool // Disable OIDC credential providers in keyless signer + OIDCDisableProviders bool // Disable OIDC credential providers in keyless signer + OIDCProvider string // Specify which OIDC credential provider to use for keyless signer BundlePath string + SkipConfirmation bool + // FulcioAuthFlow is the auth flow to use when authenticating against // Fulcio. See https://pkg.go.dev/github.com/sigstore/cosign/cmd/cosign/cli/fulcio#pkg-constants // for valid values. diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/load.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/load.go index 4a6e9e9db9..96c7b4a956 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/load.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/load.go @@ -30,5 +30,6 @@ var _ Interface = (*LoadOptions)(nil) func (o *LoadOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Directory, "dir", "", "path to directory where the signed image is stored on disk") + _ = cmd.Flags().SetAnnotation("dir", cobra.BashCompSubdirsInDir, []string{}) _ = cmd.MarkFlagRequired("dir") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/oidc.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/oidc.go index 848706487f..ec74895ae2 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/oidc.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/oidc.go @@ -21,7 +21,6 @@ import ( "strings" "unicode/utf8" - "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -33,6 +32,7 @@ type OIDCOptions struct { ClientID string clientSecretFile string RedirectURL string + Provider string DisableAmbientProviders bool } @@ -40,7 +40,7 @@ func (o *OIDCOptions) ClientSecret() (string, error) { if o.clientSecretFile != "" { clientSecretBytes, err := os.ReadFile(o.clientSecretFile) if err != nil { - return "", errors.Wrap(err, "reading OIDC client secret") + return "", fmt.Errorf("reading OIDC client secret: %w", err) } if !utf8.Valid(clientSecretBytes) { return "", fmt.Errorf("OIDC client secret in file %s not valid utf8", o.clientSecretFile) @@ -64,10 +64,14 @@ func (o *OIDCOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.clientSecretFile, "oidc-client-secret-file", "", "[EXPERIMENTAL] Path to file containing OIDC client secret for application") + _ = cmd.Flags().SetAnnotation("oidc-client-secret-file", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.RedirectURL, "oidc-redirect-url", "", "[EXPERIMENTAL] OIDC redirect URL (Optional). The default oidc-redirect-url is 'http://localhost:0/auth/callback'.") + cmd.Flags().StringVar(&o.Provider, "oidc-provider", "", + "[EXPERIMENTAL] Specify the provider to get the OIDC token from (Optional). If unset, all options will be tried. Options include: [spiffe, google, github, filesystem]") + cmd.Flags().BoolVar(&o.DisableAmbientProviders, "oidc-disable-ambient-providers", false, "[EXPERIMENTAL] Disable ambient OIDC providers. When true, ambient credentials will not be read") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/pkcs11_tool.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/pkcs11_tool.go index b18aacfa94..735e800956 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/pkcs11_tool.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/pkcs11_tool.go @@ -45,6 +45,7 @@ var _ Interface = (*PKCS11ToolListKeysUrisOptions)(nil) func (o *PKCS11ToolListKeysUrisOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.ModulePath, "module-path", "", "absolute path to the PKCS11 module") + _ = cmd.Flags().SetAnnotation("module-path", cobra.BashCompFilenameExt, []string{}) cmd.Flags().UintVar(&o.SlotID, "slot-id", 0, "id of the PKCS11 slot, uses 0 if empty") diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/policy.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/policy.go index 124144029f..a7ee209a67 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/policy.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/policy.go @@ -39,6 +39,7 @@ func (o *PolicyInitOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.OutFile, "out", "o", "output policy locally") + _ = cmd.Flags().SetAnnotation("out", cobra.BashCompSubdirsInDir, []string{}) cmd.Flags().StringVar(&o.Issuer, "issuer", "", "trusted issuer to use for identity tokens, e.g. https://accounts.google.com") @@ -56,11 +57,12 @@ func (o *PolicyInitOptions) AddFlags(cmd *cobra.Command) { } type PolicySignOptions struct { - ImageRef string - OutFile string - Registry RegistryOptions - Fulcio FulcioOptions - Rekor RekorOptions + ImageRef string + OutFile string + Registry RegistryOptions + Fulcio FulcioOptions + Rekor RekorOptions + SkipConfirmation bool OIDC OIDCOptions } @@ -75,6 +77,9 @@ func (o *PolicySignOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.OutFile, "out", "o", "output policy locally") + cmd.Flags().BoolVarP(&o.SkipConfirmation, "yes", "y", false, + "skip confirmation prompts for non-destructive operations") + o.Registry.AddFlags(cmd) o.Fulcio.AddFlags(cmd) o.Rekor.AddFlags(cmd) diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/predicate.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/predicate.go index 691de3d1e7..b502dce3c6 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/predicate.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/predicate.go @@ -28,20 +28,24 @@ import ( ) const ( - PredicateCustom = "custom" - PredicateSLSA = "slsaprovenance" - PredicateSPDX = "spdx" - PredicateLink = "link" - PredicateVuln = "vuln" + PredicateCustom = "custom" + PredicateSLSA = "slsaprovenance" + PredicateSPDX = "spdx" + PredicateSPDXJSON = "spdxjson" + PredicateCycloneDX = "cyclonedx" + PredicateLink = "link" + PredicateVuln = "vuln" ) // PredicateTypeMap is the mapping between the predicate `type` option to predicate URI. var PredicateTypeMap = map[string]string{ - PredicateCustom: attestation.CosignCustomProvenanceV01, - PredicateSLSA: slsa.PredicateSLSAProvenance, - PredicateSPDX: in_toto.PredicateSPDX, - PredicateLink: in_toto.PredicateLinkV1, - PredicateVuln: attestation.CosignVulnProvenanceV01, + PredicateCustom: attestation.CosignCustomProvenanceV01, + PredicateSLSA: slsa.PredicateSLSAProvenance, + PredicateSPDX: in_toto.PredicateSPDX, + PredicateSPDXJSON: in_toto.PredicateSPDX, + PredicateCycloneDX: in_toto.PredicateCycloneDX, + PredicateLink: in_toto.PredicateLinkV1, + PredicateVuln: attestation.CosignVulnProvenanceV01, } // PredicateOptions is the wrapper for predicate related options. @@ -54,7 +58,7 @@ var _ Interface = (*PredicateOptions)(nil) // AddFlags implements Interface func (o *PredicateOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Type, "type", "custom", - "specify a predicate type (slsaprovenance|link|spdx|vuln|custom) or an URI") + "specify a predicate type (slsaprovenance|link|spdx|spdxjson|cyclonedx|vuln|custom) or an URI") } // ParsePredicateType parses the predicate `type` flag passed into a predicate URI, or validates `type` is a valid URI. diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/public_key.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/public_key.go index cbb0f0e3cc..e3c7341a31 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/public_key.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/public_key.go @@ -34,7 +34,9 @@ func (o *PublicKeyOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "path to the private key file, KMS URI or Kubernetes Secret") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.OutFile, "outfile", "", "path to a payload file to use rather than generating one") + _ = cmd.Flags().SetAnnotation("outfile", cobra.BashCompFilenameExt, []string{}) } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/registry.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/registry.go index 0fcb750e7b..d80e8d7846 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/registry.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/registry.go @@ -17,7 +17,7 @@ package options import ( "context" "crypto/tls" - "io/ioutil" + "io" "net/http" ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" @@ -27,6 +27,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/google" "github.com/google/go-containerregistry/pkg/v1/remote" + alibabaacr "github.com/mozillazg/docker-credential-acr-helper/pkg/credhelper" ociremote "github.com/sigstore/cosign/pkg/oci/remote" "github.com/spf13/cobra" ) @@ -83,8 +84,9 @@ func (o *RegistryOptions) GetRegistryClientOpts(ctx context.Context) []remote.Op kc := authn.NewMultiKeychain( authn.DefaultKeychain, google.Keychain, - authn.NewKeychainFromHelper(ecr.NewECRHelper(ecr.WithLogger(ioutil.Discard))), + authn.NewKeychainFromHelper(ecr.NewECRHelper(ecr.WithLogger(io.Discard))), authn.NewKeychainFromHelper(credhelper.NewACRCredentialsHelper()), + authn.NewKeychainFromHelper(alibabaacr.NewACRHelper().WithLoggerOut(io.Discard)), github.Keychain, ) opts = append(opts, remote.WithAuthFromKeychain(kc)) diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/root.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/root.go index f42d92213b..d00f159965 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/root.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/root.go @@ -37,6 +37,7 @@ var _ Interface = (*RootOptions)(nil) func (o *RootOptions) AddFlags(cmd *cobra.Command) { cmd.PersistentFlags().StringVar(&o.OutputFile, "output-file", "", "log output to a file") + _ = cmd.Flags().SetAnnotation("output-file", cobra.BashCompFilenameExt, []string{}) cmd.PersistentFlags().BoolVarP(&o.Verbose, "verbose", "d", false, "log debug output") diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/save.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/save.go index c44556b2d2..58d449172b 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/save.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/save.go @@ -30,5 +30,6 @@ var _ Interface = (*SaveOptions)(nil) func (o *SaveOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Directory, "dir", "", "path to dir where the signed image should be stored on disk") + _ = cmd.Flags().SetAnnotation("dir", cobra.BashCompSubdirsInDir, []string{}) _ = cmd.MarkFlagRequired("dir") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/sign.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/sign.go index a4a1df779f..55ebffb7b4 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/sign.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/sign.go @@ -32,6 +32,8 @@ type SignOptions struct { Force bool Recursive bool Attachment string + SkipConfirmation bool + NoTlogUpload bool Rekor RekorOptions Fulcio FulcioOptions @@ -54,27 +56,33 @@ func (o *SignOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "path to the private key file, KMS URI or Kubernetes Secret") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) - cmd.Flags().StringVar(&o.Cert, "cert", "", + cmd.Flags().StringVar(&o.Cert, "certificate", "", "path to the X.509 certificate in PEM format to include in the OCI Signature") + _ = cmd.Flags().SetAnnotation("certificate", cobra.BashCompFilenameExt, []string{"cert"}) - cmd.Flags().StringVar(&o.CertChain, "cert-chain", "", + cmd.Flags().StringVar(&o.CertChain, "certificate-chain", "", "path to a list of CA X.509 certificates in PEM format which will be needed "+ "when building the certificate chain for the signing certificate. "+ "Must start with the parent intermediate CA certificate of the "+ "signing certificate and end with the root certificate. Included in the OCI Signature") + _ = cmd.Flags().SetAnnotation("certificate-chain", cobra.BashCompFilenameExt, []string{"cert"}) cmd.Flags().BoolVar(&o.Upload, "upload", true, "whether to upload the signature") cmd.Flags().StringVar(&o.OutputSignature, "output-signature", "", "write the signature to FILE") + _ = cmd.Flags().SetAnnotation("output-signature", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.OutputCertificate, "output-certificate", "", "write the certificate to FILE") + _ = cmd.Flags().SetAnnotation("output-certificate", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.PayloadPath, "payload", "", "path to a payload file to use rather than generating one") + _ = cmd.Flags().SetAnnotation("payload", cobra.BashCompFilenameExt, []string{}) cmd.Flags().BoolVarP(&o.Force, "force", "f", false, "skip warnings and confirmations") @@ -84,4 +92,10 @@ func (o *SignOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Attachment, "attachment", "", "related image attachment to sign (sbom), default none") + + cmd.Flags().BoolVarP(&o.SkipConfirmation, "yes", "y", false, + "skip confirmation prompts for non-destructive operations") + + cmd.Flags().BoolVar(&o.NoTlogUpload, "no-tlog-upload", false, + "whether to not upload the transparency log") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/signblob.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/signblob.go index 49381daa84..3d458bc485 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/signblob.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/signblob.go @@ -33,6 +33,7 @@ type SignBlobOptions struct { OIDC OIDCOptions Registry RegistryOptions BundlePath string + SkipConfirmation bool } var _ Interface = (*SignBlobOptions)(nil) @@ -47,19 +48,26 @@ func (o *SignBlobOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "path to the private key file, KMS URI or Kubernetes Secret") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) cmd.Flags().BoolVar(&o.Base64Output, "b64", true, "whether to base64 encode the output") cmd.Flags().StringVar(&o.OutputSignature, "output-signature", "", "write the signature to FILE") + _ = cmd.Flags().SetAnnotation("output-signature", cobra.BashCompFilenameExt, []string{}) // TODO: remove when output flag is fully deprecated cmd.Flags().StringVar(&o.Output, "output", "", "write the signature to FILE") cmd.Flags().StringVar(&o.OutputCertificate, "output-certificate", "", "write the certificate to FILE") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) cmd.Flags().StringVar(&o.BundlePath, "bundle", "", "write everything required to verify the blob to a FILE") + _ = cmd.Flags().SetAnnotation("bundle", cobra.BashCompFilenameExt, []string{}) + + cmd.Flags().BoolVarP(&o.SkipConfirmation, "yes", "y", false, + "skip confirmation prompts for non-destructive operations") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/upload.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/upload.go index 9d7b77ac9d..488944cf54 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/upload.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/upload.go @@ -51,5 +51,6 @@ func (o *UploadWASMOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVarP(&o.File, "file", "f", "", "path to the wasm file to upload") + _ = cmd.Flags().SetAnnotation("file", cobra.BashCompFilenameExt, []string{}) _ = cmd.MarkFlagRequired("file") } diff --git a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/verify.go b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/verify.go index 869197b333..123da549b7 100644 --- a/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/verify.go +++ b/vendor/github.com/sigstore/cosign/cmd/cosign/cli/options/verify.go @@ -49,6 +49,7 @@ func (o *VerifyOptions) AddFlags(cmd *cobra.Command) { cmd.Flags().StringVar(&o.Key, "key", "", "path to the public key file, KMS URI or Kubernetes Secret") + _ = cmd.Flags().SetAnnotation("key", cobra.BashCompFilenameExt, []string{}) cmd.Flags().BoolVar(&o.CheckClaims, "check-claims", true, "whether to check the claims found") diff --git a/vendor/github.com/sigstore/cosign/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go b/vendor/github.com/sigstore/cosign/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go new file mode 100644 index 0000000000..c02db558d7 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/internal/pkg/cosign/fulcio/fulcioroots/fulcioroots.go @@ -0,0 +1,94 @@ +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fulcioroots + +import ( + "bytes" + "crypto/x509" + "fmt" + "os" + "sync" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/fulcioroots" +) + +var ( + rootsOnce sync.Once + roots *x509.CertPool + intermediates *x509.CertPool + singletonRootErr error +) + +const altRoot = "SIGSTORE_ROOT_FILE" + +// Get returns the Fulcio root certificate. +// +// If the SIGSTORE_ROOT_FILE environment variable is set, the root config found +// there will be used instead of the normal Fulcio roots. +func Get() (*x509.CertPool, error) { + rootsOnce.Do(func() { + roots, intermediates, singletonRootErr = initRoots() + }) + return roots, singletonRootErr +} + +// GetIntermediates returns the Fulcio intermediate certificates. +// +// If the SIGSTORE_ROOT_FILE environment variable is set, the root config found +// there will be used instead of the normal Fulcio intermediates. +func GetIntermediates() (*x509.CertPool, error) { + rootsOnce.Do(func() { + roots, intermediates, singletonRootErr = initRoots() + }) + return intermediates, singletonRootErr +} + +func initRoots() (*x509.CertPool, *x509.CertPool, error) { + rootPool := x509.NewCertPool() + intermediatePool := x509.NewCertPool() + + rootEnv := os.Getenv(altRoot) + if rootEnv != "" { + raw, err := os.ReadFile(rootEnv) + if err != nil { + return nil, nil, fmt.Errorf("error reading root PEM file: %w", err) + } + certs, err := cryptoutils.UnmarshalCertificatesFromPEM(raw) + if err != nil { + return nil, nil, fmt.Errorf("error unmarshalling certificates: %w", err) + } + for _, cert := range certs { + // root certificates are self-signed + if bytes.Equal(cert.RawSubject, cert.RawIssuer) { + rootPool.AddCert(cert) + } else { + intermediatePool.AddCert(cert) + } + } + } else { + var err error + rootPool, err = fulcioroots.Get() + if err != nil { + return nil, nil, err + } + intermediatePool, err = fulcioroots.GetIntermediates() + if err != nil { + return nil, nil, err + } + } + return rootPool, intermediatePool, nil +} diff --git a/vendor/github.com/sigstore/cosign/pkg/blob/load.go b/vendor/github.com/sigstore/cosign/pkg/blob/load.go index c92d49c4ca..26db136784 100644 --- a/vendor/github.com/sigstore/cosign/pkg/blob/load.go +++ b/vendor/github.com/sigstore/cosign/pkg/blob/load.go @@ -23,6 +23,14 @@ import ( "strings" ) +type UnrecognizedSchemeError struct { + Scheme string +} + +func (e *UnrecognizedSchemeError) Error() string { + return fmt.Sprintf("loading URL: unrecognized scheme: %s", e.Scheme) +} + func LoadFileOrURL(fileRef string) ([]byte, error) { var raw []byte var err error @@ -51,7 +59,7 @@ func LoadFileOrURL(fileRef string) ([]byte, error) { } raw = []byte(value) default: - return nil, fmt.Errorf("loading URL: unrecognized scheme: %s", scheme) + return nil, &UnrecognizedSchemeError{Scheme: scheme} } } else { raw, err = os.ReadFile(filepath.Clean(fileRef)) diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/attestation/attestation.go b/vendor/github.com/sigstore/cosign/pkg/cosign/attestation/attestation.go index 3215c32ca8..9c16393a2a 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/attestation/attestation.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/attestation/attestation.go @@ -26,7 +26,6 @@ import ( slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/in-toto/in-toto-golang/in_toto" - "github.com/pkg/errors" ) const ( @@ -72,10 +71,10 @@ type DB struct { } type Scanner struct { - URI string `json:"uri"` - Version string `json:"version"` - DB DB `json:"db"` - Result map[string]interface{} `json:"result"` + URI string `json:"uri"` + Version string `json:"version"` + DB DB `json:"db"` + Result interface{} `json:"result"` } type Metadata struct { @@ -100,7 +99,7 @@ type GenerateOpts struct { } // GenerateStatement returns an in-toto statement based on the provided -// predicate type (custom|slsaprovenance|spdx|link). +// predicate type (custom|slsaprovenance|spdx|spdxjson|cyclonedx|link). func GenerateStatement(opts GenerateOpts) (interface{}, error) { predicate, err := io.ReadAll(opts.Predicate) if err != nil { @@ -111,7 +110,11 @@ func GenerateStatement(opts GenerateOpts) (interface{}, error) { case "slsaprovenance": return generateSLSAProvenanceStatement(predicate, opts.Digest, opts.Repo) case "spdx": - return generateSPDXStatement(predicate, opts.Digest, opts.Repo) + return generateSPDXStatement(predicate, opts.Digest, opts.Repo, false) + case "spdxjson": + return generateSPDXStatement(predicate, opts.Digest, opts.Repo, true) + case "cyclonedx": + return generateCycloneDXStatement(predicate, opts.Digest, opts.Repo) case "link": return generateLinkStatement(predicate, opts.Digest, opts.Repo) case "vuln": @@ -189,7 +192,7 @@ func generateCustomPredicate(rawPayload []byte, customType, timestamp string) (i var result map[string]interface{} if err := json.Unmarshal(rawPayload, &result); err != nil { - return nil, errors.Wrapf(err, "invalid JSON payload for predicate type %s", customType) + return nil, fmt.Errorf("invalid JSON payload for predicate type %s: %w", customType, err) } return result, nil @@ -203,7 +206,7 @@ func generateSLSAProvenanceStatement(rawPayload []byte, digest string, repo stri } err = json.Unmarshal(rawPayload, &predicate) if err != nil { - return "", errors.Wrap(err, "unmarshal Provenance predicate") + return "", fmt.Errorf("unmarshal Provenance predicate: %w", err) } return in_toto.ProvenanceStatement{ StatementHeader: generateStatementHeader(digest, repo, slsa.PredicateSLSAProvenance), @@ -219,7 +222,7 @@ func generateLinkStatement(rawPayload []byte, digest string, repo string) (inter } err = json.Unmarshal(rawPayload, &link) if err != nil { - return "", errors.Wrap(err, "unmarshal Link statement") + return "", fmt.Errorf("unmarshal Link statement: %w", err) } return in_toto.LinkStatement{ StatementHeader: generateStatementHeader(digest, repo, in_toto.PredicateLinkV1), @@ -227,11 +230,32 @@ func generateLinkStatement(rawPayload []byte, digest string, repo string) (inter }, nil } -func generateSPDXStatement(rawPayload []byte, digest string, repo string) (interface{}, error) { +func generateSPDXStatement(rawPayload []byte, digest string, repo string, parseJSON bool) (interface{}, error) { + var data interface{} + if parseJSON { + if err := json.Unmarshal(rawPayload, &data); err != nil { + return nil, err + } + } else { + data = string(rawPayload) + } return in_toto.SPDXStatement{ StatementHeader: generateStatementHeader(digest, repo, in_toto.PredicateSPDX), Predicate: CosignPredicate{ - Data: string(rawPayload), + Data: data, + }, + }, nil +} + +func generateCycloneDXStatement(rawPayload []byte, digest string, repo string) (interface{}, error) { + var data interface{} + if err := json.Unmarshal(rawPayload, &data); err != nil { + return nil, err + } + return in_toto.SPDXStatement{ + StatementHeader: generateStatementHeader(digest, repo, in_toto.PredicateCycloneDX), + Predicate: CosignPredicate{ + Data: data, }, }, nil } diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/certextensions.go b/vendor/github.com/sigstore/cosign/pkg/cosign/certextensions.go new file mode 100644 index 0000000000..376a2add8b --- /dev/null +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/certextensions.go @@ -0,0 +1,84 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cosign + +import "crypto/x509" + +type CertExtensions struct { + Cert *x509.Certificate +} + +var ( + // Fulcio cert-extensions, documented here: https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md + CertExtensionOIDCIssuer = "1.3.6.1.4.1.57264.1.1" + CertExtensionGithubWorkflowTrigger = "1.3.6.1.4.1.57264.1.2" + CertExtensionGithubWorkflowSha = "1.3.6.1.4.1.57264.1.3" + CertExtensionGithubWorkflowName = "1.3.6.1.4.1.57264.1.4" + CertExtensionGithubWorkflowRepository = "1.3.6.1.4.1.57264.1.5" + CertExtensionGithubWorkflowRef = "1.3.6.1.4.1.57264.1.6" + + CertExtensionMap = map[string]string{ + CertExtensionOIDCIssuer: "oidcIssuer", + CertExtensionGithubWorkflowTrigger: "githubWorkflowTrigger", + CertExtensionGithubWorkflowSha: "githubWorkflowSha", + CertExtensionGithubWorkflowName: "githubWorkflowName", + CertExtensionGithubWorkflowRepository: "githubWorkflowRepository", + CertExtensionGithubWorkflowRef: "githubWorkflowRef", + } +) + +func (ce *CertExtensions) certExtensions() map[string]string { + extensions := map[string]string{} + for _, ext := range ce.Cert.Extensions { + readableName, ok := CertExtensionMap[ext.Id.String()] + if ok { + extensions[readableName] = string(ext.Value) + } else { + extensions[ext.Id.String()] = string(ext.Value) + } + } + return extensions +} + +// GetIssuer returns the issuer for a Certificate +func (ce *CertExtensions) GetIssuer() string { + return ce.certExtensions()["oidcIssuer"] +} + +// GetCertExtensionGithubWorkflowTrigger returns the GitHub Workflow Trigger for a Certificate +func (ce *CertExtensions) GetCertExtensionGithubWorkflowTrigger() string { + return ce.certExtensions()["githubWorkflowTrigger"] +} + +// GetExtensionGithubWorkflowSha returns the GitHub Workflow SHA for a Certificate +func (ce *CertExtensions) GetExtensionGithubWorkflowSha() string { + return ce.certExtensions()["githubWorkflowSha"] +} + +// GetCertExtensionGithubWorkflowName returns the GitHub Workflow Name for a Certificate +func (ce *CertExtensions) GetCertExtensionGithubWorkflowName() string { + return ce.certExtensions()["githubWorkflowName"] +} + +// GetCertExtensionGithubWorkflowRepository returns the GitHub Workflow Repository for a Certificate +func (ce *CertExtensions) GetCertExtensionGithubWorkflowRepository() string { + return ce.certExtensions()["githubWorkflowRepository"] +} + +// GetCertExtensionGithubWorkflowRef returns the GitHub Workflow Ref for a Certificate +func (ce *CertExtensions) GetCertExtensionGithubWorkflowRef() string { + return ce.certExtensions()["githubWorkflowRef"] +} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/common.go b/vendor/github.com/sigstore/cosign/pkg/cosign/common.go index 1f8034a6b8..0992843bd8 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/common.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/common.go @@ -17,34 +17,33 @@ package cosign import ( "bufio" + "errors" "fmt" "os" "strings" "syscall" - "github.com/pkg/errors" "golang.org/x/term" ) -// TODO need to centralize this logic -func FileExists(filename string) bool { - info, err := os.Stat(filename) - if os.IsNotExist(err) { - return false +// ConfirmPrompt prompts the user for confirmation for an action. Supports skipping +// the confirmation prompt when skipConfirmation is set. +// TODO(jason): Move this to an internal package. +func ConfirmPrompt(msg string, skipConfirmation bool) (bool, error) { + if skipConfirmation { + return true, nil } - return !info.IsDir() -} -func ConfirmPrompt(msg string) (bool, error) { - fmt.Fprintf(os.Stderr, "%s\n\nAre you sure you want to continue? [Y/n]: ", msg) + fmt.Fprintf(os.Stderr, "%s\n\nAre you sure you want to continue? (y/[N]): ", msg) reader := bufio.NewReader(os.Stdin) r, err := reader.ReadString('\n') if err != nil { return false, err } - return strings.Trim(r, "\n") == "Y", nil + return strings.Trim(r, "\n") == "Y" || strings.Trim(r, "\n") == "y", nil } +// TODO(jason): Move this to an internal package. func GetPassFromTerm(confirm bool) ([]byte, error) { fmt.Fprint(os.Stderr, "Enter password for private key: ") // Unnecessary convert of syscall.Stdin on *nix, but Windows is a uintptr @@ -72,6 +71,7 @@ func GetPassFromTerm(confirm bool) ([]byte, error) { return pw1, nil } +// TODO(jason): Move this to an internal package. func IsTerminal() bool { stat, _ := os.Stdin.Stat() return (stat.Mode() & os.ModeCharDevice) != 0 diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/errors.go b/vendor/github.com/sigstore/cosign/pkg/cosign/errors.go new file mode 100644 index 0000000000..c4ef6c23db --- /dev/null +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/errors.go @@ -0,0 +1,50 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cosign + +import "fmt" + +var ( + // ErrNoMatchingSignatures is the error returned when there are no matching + // signatures during verification. + ErrNoMatchingSignatures = &VerificationError{"no matching signatures"} + + // ErrNoMatchingAttestations is the error returned when there are no + // matching attestations during verification. + ErrNoMatchingAttestations = &VerificationError{"no matching attestations"} +) + +// VerificationError is the type of Go error that is used by cosign to surface +// errors actually related to verification (vs. transient, misconfiguration, +// transport, or authentication related issues). +type VerificationError struct { + message string +} + +// NewVerificationError constructs a new VerificationError in a manner similar +// to fmt.Errorf +func NewVerificationError(msg string, args ...interface{}) error { + return &VerificationError{ + message: fmt.Sprintf(msg, args...), + } +} + +// Assert that we implement error at build time. +var _ error = (*VerificationError)(nil) + +// Error implements error +func (ve *VerificationError) Error() string { + return ve.message +} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/fetch.go b/vendor/github.com/sigstore/cosign/pkg/cosign/fetch.go index 80b85762bf..8d1b412464 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/fetch.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/fetch.go @@ -20,14 +20,13 @@ import ( "crypto/x509" "encoding/json" "fmt" - "io/ioutil" + "os" "runtime" "github.com/google/go-containerregistry/pkg/name" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/cosign/bundle" ociremote "github.com/sigstore/cosign/pkg/oci/remote" - "knative.dev/pkg/pool" + "golang.org/x/sync/errgroup" ) type SignedPayload struct { @@ -69,21 +68,22 @@ func FetchSignaturesForReference(ctx context.Context, ref name.Reference, opts . sigs, err := simg.Signatures() if err != nil { - return nil, errors.Wrap(err, "remote image") + return nil, fmt.Errorf("remote image: %w", err) } l, err := sigs.Get() if err != nil { - return nil, errors.Wrap(err, "fetching signatures") + return nil, fmt.Errorf("fetching signatures: %w", err) } if len(l) == 0 { - return nil, fmt.Errorf("no signatures associated with %v", ref) + return nil, fmt.Errorf("no signatures associated with %s", ref) } - g := pool.New(runtime.NumCPU()) signatures := make([]SignedPayload, len(l)) + var g errgroup.Group + g.SetLimit(runtime.NumCPU()) for i, sig := range l { i, sig := i, sig - g.Go(func() (err error) { + g.Go(func() error { signatures[i].Payload, err = sig.Payload() if err != nil { return err @@ -119,27 +119,24 @@ func FetchAttestationsForReference(ctx context.Context, ref name.Reference, opts atts, err := simg.Attestations() if err != nil { - return nil, errors.Wrap(err, "remote image") + return nil, fmt.Errorf("remote image: %w", err) } l, err := atts.Get() if err != nil { - return nil, errors.Wrap(err, "fetching attestations") + return nil, fmt.Errorf("fetching attestations: %w", err) } if len(l) == 0 { - return nil, fmt.Errorf("no attestations associated with %v", ref) + return nil, fmt.Errorf("no attestations associated with %s", ref) } - g := pool.New(runtime.NumCPU()) attestations := make([]AttestationPayload, len(l)) + var g errgroup.Group + g.SetLimit(runtime.NumCPU()) for i, att := range l { i, att := i, att - g.Go(func() (err error) { + g.Go(func() error { attestPayload, _ := att.Payload() - err = json.Unmarshal(attestPayload, &attestations[i]) - if err != nil { - return err - } - return err + return json.Unmarshal(attestPayload, &attestations[i]) }) } if err := g.Wait(); err != nil { @@ -151,9 +148,9 @@ func FetchAttestationsForReference(ctx context.Context, ref name.Reference, opts // FetchLocalSignedPayloadFromPath fetches a local signed payload from a path to a file func FetchLocalSignedPayloadFromPath(path string) (*LocalSignedPayload, error) { - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) if err != nil { - return nil, errors.Wrapf(err, "reading %s", path) + return nil, fmt.Errorf("reading %s: %w", path, err) } var b *LocalSignedPayload if err := json.Unmarshal(contents, &b); err != nil { diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/git/github/github.go b/vendor/github.com/sigstore/cosign/pkg/cosign/git/github/github.go index f92b733af9..6e41603d54 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/git/github/github.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/git/github/github.go @@ -18,14 +18,15 @@ package github import ( "context" "encoding/base64" + "errors" "fmt" "io" "net/http" "os" "strings" - "github.com/google/go-github/v42/github" - "github.com/pkg/errors" + "github.com/google/go-github/v45/github" + "golang.org/x/crypto/nacl/box" "golang.org/x/oauth2" "github.com/sigstore/cosign/pkg/cosign" @@ -42,11 +43,6 @@ func New() *Gh { } func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) error { - keys, err := cosign.GenerateKeyPair(pf) - if err != nil { - return errors.Wrap(err, "generating key pair") - } - var httpClient *http.Client if token, ok := os.LookupEnv("GITHUB_TOKEN"); ok { ts := oauth2.StaticTokenSource( @@ -54,10 +50,15 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro ) httpClient = oauth2.NewClient(ctx, ts) } else { - return errors.New("could not find \"GITHUB_TOKEN\" env variable") + return errors.New("could not find \"GITHUB_TOKEN\" environment variable") } client := github.NewClient(httpClient) + keys, err := cosign.GenerateKeyPair(pf) + if err != nil { + return fmt.Errorf("generating key pair: %w", err) + } + split := strings.Split(ref, "/") if len(split) < 2 { return errors.New("could not parse scheme, use github:/// format") @@ -66,7 +67,7 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro key, getRepoPubKeyResp, err := client.Actions.GetRepoPublicKey(ctx, owner, repo) if err != nil { - return errors.Wrap(err, "could not get repository public key") + return fmt.Errorf("could not get repository public key: %w", err) } if getRepoPubKeyResp.StatusCode < 200 && getRepoPubKeyResp.StatusCode >= 300 { @@ -74,15 +75,14 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro return fmt.Errorf("%s", bodyBytes) } - passwordSecretEnv := &github.EncryptedSecret{ - Name: "COSIGN_PASSWORD", - KeyID: key.GetKeyID(), - EncryptedValue: base64.StdEncoding.EncodeToString(keys.Password()), + encryptedCosignPasswd, err := encryptSecretWithPublicKey(key, "COSIGN_PASSWORD", keys.Password()) + if err != nil { + return fmt.Errorf("could not encrypt the secret: %w", err) } - passwordSecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, passwordSecretEnv) + passwordSecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPasswd) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PASSWORD\" github actions secret") + return fmt.Errorf("could not create \"COSIGN_PASSWORD\" github actions secret: %w", err) } if passwordSecretEnvResp.StatusCode < 200 && passwordSecretEnvResp.StatusCode >= 300 { @@ -92,15 +92,14 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro fmt.Fprintln(os.Stderr, "Password written to COSIGN_PASSWORD github actions secret") - privateKeySecretEnv := &github.EncryptedSecret{ - Name: "COSIGN_PRIVATE_KEY", - KeyID: key.GetKeyID(), - EncryptedValue: base64.StdEncoding.EncodeToString(keys.PrivateBytes), + encryptedCosignPrivKey, err := encryptSecretWithPublicKey(key, "COSIGN_PRIVATE_KEY", keys.PrivateBytes) + if err != nil { + return fmt.Errorf("could not encrypt the secret: %w", err) } - privateKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, privateKeySecretEnv) + privateKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPrivKey) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PRIVATE_KEY\" github actions secret") + return fmt.Errorf("could not create \"COSIGN_PRIVATE_KEY\" github actions secret: %w", err) } if privateKeySecretEnvResp.StatusCode < 200 && privateKeySecretEnvResp.StatusCode >= 300 { @@ -110,15 +109,14 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro fmt.Fprintln(os.Stderr, "Private key written to COSIGN_PRIVATE_KEY github actions secret") - publicKeySecretEnv := &github.EncryptedSecret{ - Name: "COSIGN_PUBLIC_KEY", - KeyID: key.GetKeyID(), - EncryptedValue: base64.StdEncoding.EncodeToString(keys.PublicBytes), + encryptedCosignPubKey, err := encryptSecretWithPublicKey(key, "COSIGN_PUBLIC_KEY", keys.PublicBytes) + if err != nil { + return fmt.Errorf("could not encrypt the secret: %w", err) } - publicKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, publicKeySecretEnv) + publicKeySecretEnvResp, err := client.Actions.CreateOrUpdateRepoSecret(ctx, owner, repo, encryptedCosignPubKey) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PUBLIC_KEY\" github actions secret") + return fmt.Errorf("could not create \"COSIGN_PUBLIC_KEY\" github actions secret: %w", err) } if publicKeySecretEnvResp.StatusCode < 200 && publicKeySecretEnvResp.StatusCode >= 300 { @@ -140,3 +138,29 @@ func (g *Gh) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro func (g *Gh) GetSecret(ctx context.Context, ref string, key string) (string, error) { return "", nil } + +func encryptSecretWithPublicKey(publicKey *github.PublicKey, secretName string, secretValue []byte) (*github.EncryptedSecret, error) { + decodedPubKey, err := base64.StdEncoding.DecodeString(publicKey.GetKey()) + if err != nil { + return nil, fmt.Errorf("failed to decode public key: %w", err) + } + var peersPubKey [32]byte + copy(peersPubKey[:], decodedPubKey[0:32]) + + var rand io.Reader + + eBody, err := box.SealAnonymous(nil, secretValue, &peersPubKey, rand) + if err != nil { + return nil, fmt.Errorf("failed to encrypt body: %w", err) + } + + encryptedString := base64.StdEncoding.EncodeToString(eBody) + keyID := publicKey.GetKeyID() + encryptedSecret := &github.EncryptedSecret{ + Name: secretName, + KeyID: keyID, + EncryptedValue: encryptedString, + } + + return encryptedSecret, nil +} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/git/gitlab/gitlab.go b/vendor/github.com/sigstore/cosign/pkg/cosign/git/gitlab/gitlab.go index c143923515..d06b21785a 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/git/gitlab/gitlab.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/git/gitlab/gitlab.go @@ -17,11 +17,11 @@ package gitlab import ( "context" + "errors" "fmt" "io" "os" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/cosign" "github.com/xanzy/go-gitlab" ) @@ -39,7 +39,7 @@ func New() *Gl { func (g *Gl) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) error { keys, err := cosign.GenerateKeyPair(pf) if err != nil { - return errors.Wrap(err, "generating key pair") + return fmt.Errorf("generating key pair: %w", err) } token, tokenExists := os.LookupEnv("GITLAB_TOKEN") @@ -52,12 +52,12 @@ func (g *Gl) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro if url, baseURLExists := os.LookupEnv("GITLAB_HOST"); baseURLExists { client, err = gitlab.NewClient(token, gitlab.WithBaseURL(url)) if err != nil { - return errors.Wrap(err, "could not create GitLab client") + return fmt.Errorf("could not create GitLab client: %w", err) } } else { client, err = gitlab.NewClient(token) if err != nil { - return errors.Wrap(err, "could not create GitLab client") + return fmt.Errorf("could not create GitLab client: %w", err) } } @@ -70,12 +70,12 @@ func (g *Gl) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro EnvironmentScope: gitlab.String("*"), }) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PASSWORD\" variable") + return fmt.Errorf("could not create \"COSIGN_PASSWORD\" variable: %w", err) } if passwordResp.StatusCode < 200 && passwordResp.StatusCode >= 300 { bodyBytes, _ := io.ReadAll(passwordResp.Body) - return errors.Errorf("%s", bodyBytes) + return fmt.Errorf("%s", bodyBytes) } fmt.Fprintln(os.Stderr, "Password written to \"COSIGN_PASSWORD\" variable") @@ -88,12 +88,12 @@ func (g *Gl) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro Masked: gitlab.Bool(false), }) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PRIVATE_KEY\" variable") + return fmt.Errorf("could not create \"COSIGN_PRIVATE_KEY\" variable: %w", err) } if privateKeyResp.StatusCode < 200 && privateKeyResp.StatusCode >= 300 { bodyBytes, _ := io.ReadAll(privateKeyResp.Body) - return errors.Errorf("%s", bodyBytes) + return fmt.Errorf("%s", bodyBytes) } fmt.Fprintln(os.Stderr, "Private key written to \"COSIGN_PRIVATE_KEY\" variable") @@ -106,12 +106,12 @@ func (g *Gl) PutSecret(ctx context.Context, ref string, pf cosign.PassFunc) erro Masked: gitlab.Bool(false), }) if err != nil { - return errors.Wrap(err, "could not create \"COSIGN_PUBLIC_KEY\" variable") + return fmt.Errorf("could not create \"COSIGN_PUBLIC_KEY\" variable: %w", err) } if publicKeyResp.StatusCode < 200 && publicKeyResp.StatusCode >= 300 { bodyBytes, _ := io.ReadAll(publicKeyResp.Body) - return errors.Errorf("%s", bodyBytes) + return fmt.Errorf("%s", bodyBytes) } fmt.Fprintln(os.Stderr, "Public key written to \"COSIGN_PUBLIC_KEY\" variable") @@ -136,25 +136,25 @@ func (g *Gl) GetSecret(ctx context.Context, ref string, key string) (string, err if url, baseURLExists := os.LookupEnv("GITLAB_HOST"); baseURLExists { client, err = gitlab.NewClient(token, gitlab.WithBaseURL(url)) if err != nil { - return varPubKeyValue, errors.Wrap(err, "could not create GitLab client") + return varPubKeyValue, fmt.Errorf("could not create GitLab client): %w", err) } } else { client, err = gitlab.NewClient(token) if err != nil { - return varPubKeyValue, errors.Wrap(err, "could not create GitLab client") + return varPubKeyValue, fmt.Errorf("could not create GitLab client: %w", err) } } varPubKey, pubKeyResp, err := client.ProjectVariables.GetVariable(ref, key, nil) if err != nil { - return varPubKeyValue, errors.Wrap(err, "could not retrieve \"COSIGN_PUBLIC_KEY\" variable") + return varPubKeyValue, fmt.Errorf("could not retrieve \"COSIGN_PUBLIC_KEY\" variable: %w", err) } varPubKeyValue = varPubKey.Value if pubKeyResp.StatusCode < 200 && pubKeyResp.StatusCode >= 300 { bodyBytes, _ := io.ReadAll(pubKeyResp.Body) - return varPubKeyValue, errors.Errorf("%s", bodyBytes) + return varPubKeyValue, fmt.Errorf("%s", bodyBytes) } return varPubKeyValue, nil diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/keys.go b/vendor/github.com/sigstore/cosign/pkg/cosign/keys.go index a0855f7f17..0a4e9398a4 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/keys.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/keys.go @@ -25,11 +25,11 @@ import ( _ "crypto/sha256" // for `crypto.SHA256` "crypto/x509" "encoding/pem" + "errors" "fmt" "os" "path/filepath" - "github.com/pkg/errors" "github.com/theupdateframework/go-tuf/encrypted" "github.com/sigstore/cosign/pkg/oci/static" @@ -57,16 +57,23 @@ type Keys struct { public crypto.PublicKey } +// TODO(jason): Move this to an internal package. type KeysBytes struct { PrivateBytes []byte PublicBytes []byte password []byte } +func (k *KeysBytes) Password() []byte { + return k.password +} + +// TODO(jason): Move this to an internal package. func GeneratePrivateKey() (*ecdsa.PrivateKey, error) { return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) } +// TODO(jason): Move this to the only place it's used in cmd/cosign/cli/importkeypair, and unexport it. func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { kb, err := os.ReadFile(filepath.Clean(keyPath)) if err != nil { @@ -84,10 +91,10 @@ func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { case RSAPrivateKeyPemType: rsaPk, err := x509.ParsePKCS1PrivateKey(p.Bytes) if err != nil { - return nil, fmt.Errorf("error parsing rsa private key") + return nil, fmt.Errorf("error parsing rsa private key: %w", err) } if err = cryptoutils.ValidatePubKey(rsaPk.Public()); err != nil { - return nil, errors.Wrap(err, "error validating rsa key") + return nil, fmt.Errorf("error validating rsa key: %w", err) } pk = rsaPk case ECPrivateKeyPemType: @@ -96,7 +103,7 @@ func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { return nil, fmt.Errorf("error parsing ecdsa private key") } if err = cryptoutils.ValidatePubKey(ecdsaPk.Public()); err != nil { - return nil, errors.Wrap(err, "error validating ecdsa key") + return nil, fmt.Errorf("error validating ecdsa key: %w", err) } pk = ecdsaPk case PrivateKeyPemType: @@ -107,17 +114,17 @@ func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { switch k := pkcs8Pk.(type) { case *rsa.PrivateKey: if err = cryptoutils.ValidatePubKey(k.Public()); err != nil { - return nil, errors.Wrap(err, "error validating rsa key") + return nil, fmt.Errorf("error validating rsa key: %w", err) } pk = k case *ecdsa.PrivateKey: if err = cryptoutils.ValidatePubKey(k.Public()); err != nil { - return nil, errors.Wrap(err, "error validating ecdsa key") + return nil, fmt.Errorf("error validating ecdsa key: %w", err) } pk = k case ed25519.PrivateKey: if err = cryptoutils.ValidatePubKey(k.Public()); err != nil { - return nil, errors.Wrap(err, "error validating ed25519 key") + return nil, fmt.Errorf("error validating ed25519 key: %w", err) } pk = k default: @@ -132,7 +139,7 @@ func ImportKeyPair(keyPath string, pf PassFunc) (*KeysBytes, error) { func marshalKeyPair(keypair Keys, pf PassFunc) (key *KeysBytes, err error) { x509Encoded, err := x509.MarshalPKCS8PrivateKey(keypair.private) if err != nil { - return nil, errors.Wrap(err, "x509 encoding private key") + return nil, fmt.Errorf("x509 encoding private key: %w", err) } password := []byte{} @@ -167,6 +174,7 @@ func marshalKeyPair(keypair Keys, pf PassFunc) (key *KeysBytes, err error) { }, nil } +// TODO(jason): Move this to an internal package. func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { priv, err := GeneratePrivateKey() if err != nil { @@ -176,10 +184,7 @@ func GenerateKeyPair(pf PassFunc) (*KeysBytes, error) { return marshalKeyPair(Keys{priv, priv.Public()}, pf) } -func (k *KeysBytes) Password() []byte { - return k.password -} - +// TODO(jason): Move this to an internal package. func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { pub, err := cryptoutils.UnmarshalPEMToPublicKey(pemBytes) if err != nil { @@ -192,6 +197,7 @@ func PemToECDSAKey(pemBytes []byte) (*ecdsa.PublicKey, error) { return ecdsaPub, nil } +// TODO(jason): Move this to pkg/signature, the only place it's used, and unimport it. func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { // Decrypt first p, _ := pem.Decode(key) @@ -204,12 +210,12 @@ func LoadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) { x509Encoded, err := encrypted.Decrypt(p.Bytes, pass) if err != nil { - return nil, errors.Wrap(err, "decrypt") + return nil, fmt.Errorf("decrypt: %w", err) } pk, err := x509.ParsePKCS8PrivateKey(x509Encoded) if err != nil { - return nil, errors.Wrap(err, "parsing private key") + return nil, fmt.Errorf("parsing private key: %w", err) } switch pk := pk.(type) { case *rsa.PrivateKey: diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/client.go b/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/client.go index 82e9f9c25c..c89a4e0b45 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/client.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/client.go @@ -26,37 +26,18 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func defaultClientConfig() clientcmd.ClientConfig { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} - return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) -} - -func restClientConfig() (*rest.Config, error) { - kubeCfg := defaultClientConfig() - - restConfig, err := kubeCfg.ClientConfig() +func client() (kubernetes.Interface, error) { + cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), nil).ClientConfig() if clientcmd.IsEmptyConfig(err) { - restConfig, err := rest.InClusterConfig() + cfg, err = rest.InClusterConfig() if err != nil { - return restConfig, fmt.Errorf("error creating REST client config in-cluster: %w", err) + return nil, fmt.Errorf("error creating REST client config in-cluster: %w", err) } - - return restConfig, nil - } - if err != nil { - return restConfig, fmt.Errorf("error creating REST client config: %w", err) - } - - return restConfig, nil -} - -func Client() (kubernetes.Interface, error) { - config, err := restClientConfig() - if err != nil { - return nil, fmt.Errorf("getting client config for Kubernetes client: %w", err) + } else if err != nil { + return nil, fmt.Errorf("error creating REST client config: %w", err) } - return kubernetes.NewForConfig(config) + return kubernetes.NewForConfig(cfg) } func checkImmutableSecretSupported(client kubernetes.Interface) (bool, error) { diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/secret.go b/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/secret.go index 385846fed8..80d1e3abde 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/secret.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/kubernetes/secret.go @@ -16,13 +16,13 @@ package kubernetes import ( "context" + "errors" "fmt" "os" "strings" "k8s.io/utils/pointer" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,14 +40,14 @@ func GetKeyPairSecret(ctx context.Context, k8sRef string) (*v1.Secret, error) { return nil, err } - client, err := Client() + client, err := client() if err != nil { - return nil, errors.Wrap(err, "new for config") + return nil, fmt.Errorf("new for config: %w", err) } var s *v1.Secret if s, err = client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil { - return nil, errors.Wrap(err, "checking if secret exists") + return nil, fmt.Errorf("checking if secret exists: %w", err) } return s, nil @@ -61,32 +61,32 @@ func KeyPairSecret(ctx context.Context, k8sRef string, pf cosign.PassFunc) error // now, generate the key in memory keys, err := cosign.GenerateKeyPair(pf) if err != nil { - return errors.Wrap(err, "generating key pair") + return fmt.Errorf("generating key pair: %w", err) } // create the k8s client - client, err := Client() + client, err := client() if err != nil { - return errors.Wrap(err, "new for config") + return fmt.Errorf("new for config: %w", err) } immutable, err := checkImmutableSecretSupported(client) if err != nil { - return errors.Wrap(err, "check immutable") + return fmt.Errorf("check immutable: %w", err) } var s *v1.Secret if s, err = client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil { if k8serrors.IsNotFound(err) { s, err = client.CoreV1().Secrets(namespace).Create(ctx, secret(keys, namespace, name, nil, immutable), metav1.CreateOptions{}) if err != nil { - return errors.Wrapf(err, "creating secret %s in ns %s", name, namespace) + return fmt.Errorf("creating secret %s in ns %s: %w", name, namespace, err) } } else { - return errors.Wrap(err, "checking if secret exists") + return fmt.Errorf("checking if secret exists: %w", err) } } else { // Update the existing secret s, err = client.CoreV1().Secrets(namespace).Update(ctx, secret(keys, namespace, name, s.Data, immutable), metav1.UpdateOptions{}) if err != nil { - return errors.Wrapf(err, "updating secret %s in ns %s", name, namespace) + return fmt.Errorf("updating secret %s in ns %s: %w", name, namespace, err) } } diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/pkcs11key.go b/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/pkcs11key.go index 6d17ef96d1..f1c7adf33e 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/pkcs11key.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/pkcs11key.go @@ -25,6 +25,7 @@ import ( "crypto/rsa" "crypto/sha256" "crypto/x509" + "errors" "fmt" "io" "os" @@ -33,7 +34,6 @@ import ( "github.com/ThalesIgnite/crypto11" "github.com/miekg/pkcs11" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature" "golang.org/x/term" ) @@ -72,7 +72,7 @@ func GetKeyWithURIConfig(config *Pkcs11UriConfig, askForPinIfNeeded bool) (*Key, } info, err := os.Stat(config.ModulePath) if err != nil { - return nil, errors.Wrap(err, "access modulePath") + return nil, fmt.Errorf("access modulePath: %w", err) } if !info.Mode().IsRegular() { return nil, errors.New("modulePath does not point to a regular file") @@ -93,7 +93,7 @@ func GetKeyWithURIConfig(config *Pkcs11UriConfig, askForPinIfNeeded bool) (*Key, } err := p.Initialize() if err != nil { - return errors.Wrap(err, "initialize PKCS11 module") + return fmt.Errorf("initialize PKCS11 module: %w", err) } defer p.Destroy() defer p.Finalize() @@ -103,18 +103,18 @@ func GetKeyWithURIConfig(config *Pkcs11UriConfig, askForPinIfNeeded bool) (*Key, if config.SlotID != nil { tokenInfo, err = p.GetTokenInfo(uint(*config.SlotID)) if err != nil { - return errors.Wrap(err, "get token info") + return fmt.Errorf("get token info: %w", err) } } else { slots, err := p.GetSlotList(true) if err != nil { - return errors.Wrap(err, "get slot list of PKCS11 module") + return fmt.Errorf("get slot list of PKCS11 module: %w", err) } for _, slot := range slots { currentTokenInfo, err := p.GetTokenInfo(slot) if err != nil { - return errors.Wrap(err, "get token info") + return fmt.Errorf("get token info: %w", err) } if currentTokenInfo.Label == config.TokenLabel { tokenInfo = currentTokenInfo @@ -134,7 +134,7 @@ func GetKeyWithURIConfig(config *Pkcs11UriConfig, askForPinIfNeeded bool) (*Key, // nolint:unconvert b, err := term.ReadPassword(int(syscall.Stdin)) if err != nil { - return errors.Wrap(err, "get pin") + return fmt.Errorf("get pin: %w", err) } conf.Pin = string(b) } @@ -196,11 +196,11 @@ func (k *Key) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, er func (k *Key) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error { sig, err := io.ReadAll(signature) if err != nil { - return errors.Wrap(err, "read signature") + return fmt.Errorf("read signature: %w", err) } msg, err := io.ReadAll(message) if err != nil { - return errors.Wrap(err, "read message") + return fmt.Errorf("read message: %w", err) } digest := sha256.Sum256(msg) diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/util.go b/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/util.go index 5d29b6d87d..43c513fa68 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/util.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/pkcs11key/util.go @@ -15,13 +15,12 @@ package pkcs11key import ( + "errors" "fmt" "net/url" "os" "strconv" "strings" - - "github.com/pkg/errors" ) const ( @@ -117,7 +116,7 @@ func (conf *Pkcs11UriConfig) Parse(uriString string) error { uri, err := url.Parse(uriString) if err != nil { - return errors.Wrap(err, "parse uri") + return fmt.Errorf("parse uri: %w", err) } if uri.Scheme != "pkcs11" { return errors.New("invalid uri: not a PKCS11 uri") @@ -129,12 +128,12 @@ func (conf *Pkcs11UriConfig) Parse(uriString string) error { uri.Opaque = strings.ReplaceAll(uri.Opaque, ";", "&") uriPathAttributes, err := url.ParseQuery(uri.Opaque) if err != nil { - return errors.Wrap(err, "parse uri path") + return fmt.Errorf("parse uri path: %w", err) } uri.RawQuery = strings.ReplaceAll(uri.RawQuery, ";", "&") uriQueryAttributes, err := url.ParseQuery(uri.RawQuery) if err != nil { - return errors.Wrap(err, "parse uri query") + return fmt.Errorf("parse uri query: %w", err) } modulePath := uriQueryAttributes.Get("module-path") pinValue := uriQueryAttributes.Get("pin-value") @@ -213,7 +212,7 @@ func (conf *Pkcs11UriConfig) Construct() (string, error) { if conf.TokenLabel != "" { tokenLabel, err = EncodeURIComponent(conf.TokenLabel, true, true) if err != nil { - return "", errors.Wrap(err, "encode token label") + return "", fmt.Errorf("encode token label: %w", err) } uriString += "token=" + tokenLabel } @@ -228,19 +227,19 @@ func (conf *Pkcs11UriConfig) Construct() (string, error) { if len(conf.KeyLabel) != 0 { keyLabel, err = EncodeURIComponent(string(conf.KeyLabel), true, true) if err != nil { - return "", errors.Wrap(err, "encode key label") + return "", fmt.Errorf("encode key label: %w", err) } uriString += ";object=" + keyLabel } modulePath, err = EncodeURIComponent(conf.ModulePath, false, true) if err != nil { - return "", errors.Wrap(err, "encode module path") + return "", fmt.Errorf("encode module path: %w", err) } uriString += "?module-path=" + modulePath if conf.Pin != "" { pinValue, err = EncodeURIComponent(conf.Pin, false, true) if err != nil { - return "", errors.Wrap(err, "encode pin") + return "", fmt.Errorf("encode pin: %w", err) } uriString += "&pin-value=" + pinValue } diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/rekor_factory.go b/vendor/github.com/sigstore/cosign/pkg/cosign/rekor_factory.go new file mode 100644 index 0000000000..320fcbf358 --- /dev/null +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/rekor_factory.go @@ -0,0 +1,41 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cosign + +import ( + "context" + + "github.com/sigstore/rekor/pkg/generated/client" +) + +// key is used for associating the Rekor client client inside the +// context.Context. +type key struct{} + +// TODO(jason): Rename this to something better than pkg/cosign.Set. +func Set(ctx context.Context, rekorClient *client.Rekor) context.Context { + return context.WithValue(ctx, key{}, rekorClient) +} + +// Get extracts the Rekor client from the context. +// TODO(jason): Rename this to something better than pkg/cosign.Get. +func Get(ctx context.Context) *client.Rekor { + untyped := ctx.Value(key{}) + if untyped == nil { + return nil + } + return untyped.(*client.Rekor) +} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tlog.go b/vendor/github.com/sigstore/cosign/pkg/cosign/tlog.go index 21970b280e..755b0a5ea1 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tlog.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/tlog.go @@ -23,24 +23,27 @@ import ( "crypto/x509" "encoding/base64" "encoding/hex" + "errors" "fmt" "os" + "strconv" "strings" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" - "github.com/google/trillian/merkle/logverifier" - "github.com/google/trillian/merkle/rfc6962" - "github.com/pkg/errors" - "github.com/sigstore/cosign/pkg/cosign/bundle" - "github.com/sigstore/cosign/pkg/cosign/tuf" - "github.com/sigstore/rekor/pkg/generated/client/index" + "github.com/transparency-dev/merkle/proof" + "github.com/transparency-dev/merkle/rfc6962" + "github.com/sigstore/cosign/pkg/cosign/bundle" "github.com/sigstore/rekor/pkg/generated/client" "github.com/sigstore/rekor/pkg/generated/client/entries" + "github.com/sigstore/rekor/pkg/generated/client/index" "github.com/sigstore/rekor/pkg/generated/models" + "github.com/sigstore/rekor/pkg/types" hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1" + "github.com/sigstore/rekor/pkg/types/intoto" intoto_v001 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.1" + "github.com/sigstore/sigstore/pkg/tuf" ) // This is the rekor public key target name @@ -59,12 +62,17 @@ const ( altRekorPublicKey = "SIGSTORE_REKOR_PUBLIC_KEY" // Add Rekor API Public Key // If specified, will fetch the Rekor Public Key from the specified Rekor - // server and add it to RekorPubKeys. + // server and add it to RekorPubKeys. This ENV var is only for testing + // purposes, as users should distribute keys out of band. // TODO(vaikas): Implement storing state like Rekor does so that if tree // state ever changes, it will make lots of noise. addRekorPublicKeyFromRekor = "SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY" ) +const treeIDHexStringLen = 16 +const uuidHexStringLen = 64 +const entryIDHexStringLen = treeIDHexStringLen + uuidHexStringLen + // getLogID generates a SHA256 hash of a DER-encoded public key. func getLogID(pub crypto.PublicKey) (string, error) { pubBytes, err := x509.MarshalPKIXPublicKey(pub) @@ -75,48 +83,83 @@ func getLogID(pub crypto.PublicKey) (string, error) { return hex.EncodeToString(digest[:]), nil } +func intotoEntry(ctx context.Context, signature, pubKey []byte) (models.ProposedEntry, error) { + return types.NewProposedEntry(ctx, intoto.KIND, intoto_v001.APIVERSION, types.ArtifactProperties{ + ArtifactBytes: signature, + PublicKeyBytes: pubKey, + }) +} + // GetRekorPubs retrieves trusted Rekor public keys from the embedded or cached // TUF root. If expired, makes a network call to retrieve the updated targets. -func GetRekorPubs(ctx context.Context) (map[string]RekorPubKey, error) { - tufClient, err := tuf.NewFromEnv(ctx) - if err != nil { - return nil, err - } - defer tufClient.Close() - targets, err := tufClient.GetTargetsByMeta(tuf.Rekor, []string{rekorTargetStr}) - if err != nil { - return nil, err - } +// A Rekor client may optionally be provided in case using SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY +// (see below). +// There are two Env variable that can be used to override this behaviour: +// SIGSTORE_REKOR_PUBLIC_KEY - If specified, location of the file that contains +// the Rekor Public Key on local filesystem +// SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY - If specified, fetches the Rekor public +// key from the Rekor server using the provided rekorClient. +// TODO: Rename SIGSTORE_TRUST_REKOR_API_PUBLIC_KEY to be test-only or remove. +func GetRekorPubs(ctx context.Context, rekorClient *client.Rekor) (map[string]RekorPubKey, error) { publicKeys := make(map[string]RekorPubKey) altRekorPub := os.Getenv(altRekorPublicKey) + if altRekorPub != "" { - fmt.Fprintf(os.Stderr, "**Warning** Using a non-standard public key for Rekor: %s\n", altRekorPub) raw, err := os.ReadFile(altRekorPub) if err != nil { - return nil, errors.Wrap(err, "error reading alternate Rekor public key file") + return nil, fmt.Errorf("error reading alternate Rekor public key file: %w", err) } extra, err := PemToECDSAKey(raw) if err != nil { - return nil, errors.Wrap(err, "error converting PEM to ECDSAKey") + return nil, fmt.Errorf("error converting PEM to ECDSAKey: %w", err) } keyID, err := getLogID(extra) if err != nil { - return nil, errors.Wrap(err, "error generating log ID") + return nil, fmt.Errorf("error generating log ID: %w", err) } publicKeys[keyID] = RekorPubKey{PubKey: extra, Status: tuf.Active} } else { + tufClient, err := tuf.NewFromEnv(ctx) + if err != nil { + return nil, err + } + targets, err := tufClient.GetTargetsByMeta(tuf.Rekor, []string{rekorTargetStr}) + if err != nil { + return nil, err + } for _, t := range targets { rekorPubKey, err := PemToECDSAKey(t.Target) if err != nil { - return nil, errors.Wrap(err, "pem to ecdsa") + return nil, fmt.Errorf("pem to ecdsa: %w", err) } keyID, err := getLogID(rekorPubKey) if err != nil { - return nil, errors.Wrap(err, "error generating log ID") + return nil, fmt.Errorf("error generating log ID: %w", err) } publicKeys[keyID] = RekorPubKey{PubKey: rekorPubKey, Status: t.Status} } } + + // If we have a Rekor client and we've been told to fetch the Public Key from Rekor, + // additionally fetch it here. + addRekorPublic := os.Getenv(addRekorPublicKeyFromRekor) + if addRekorPublic != "" && rekorClient != nil { + fmt.Fprintf(os.Stderr, "**Warning ('%s' is only for testing)** Fetching public key from Rekor API directly\n", addRekorPublicKeyFromRekor) + pubOK, err := rekorClient.Pubkey.GetPublicKey(nil) + if err != nil { + return nil, fmt.Errorf("unable to fetch rekor public key from rekor: %w", err) + } + pubFromAPI, err := PemToECDSAKey([]byte(pubOK.Payload)) + if err != nil { + return nil, fmt.Errorf("error converting rekor PEM public key from rekor to ECDSAKey: %w", err) + } + keyID, err := getLogID(pubFromAPI) + if err != nil { + return nil, fmt.Errorf("error generating log ID: %w", err) + } + publicKeys[keyID] = RekorPubKey{PubKey: pubFromAPI, Status: tuf.Active} + } + if len(publicKeys) == 0 { return nil, errors.New("none of the Rekor public keys have been found") } @@ -136,12 +179,12 @@ func TLogUpload(ctx context.Context, rekorClient *client.Rekor, signature, paylo // TLogUploadInTotoAttestation will upload and in-toto entry for the signature and public key to the transparency log. func TLogUploadInTotoAttestation(ctx context.Context, rekorClient *client.Rekor, signature, pemBytes []byte) (*models.LogEntryAnon, error) { - e := intotoEntry(signature, pemBytes) - returnVal := models.Intoto{ - APIVersion: swag.String(e.APIVersion()), - Spec: e.IntotoObj, + e, err := intotoEntry(ctx, signature, pemBytes) + if err != nil { + return nil, err } - return doUpload(ctx, rekorClient, &returnVal) + + return doUpload(ctx, rekorClient, e) } func doUpload(ctx context.Context, rekorClient *client.Rekor, pe models.ProposedEntry) (*models.LogEntryAnon, error) { @@ -171,18 +214,6 @@ func doUpload(ctx context.Context, rekorClient *client.Rekor, pe models.Proposed return nil, errors.New("bad response from server") } -func intotoEntry(signature, pubKey []byte) intoto_v001.V001Entry { - pub := strfmt.Base64(pubKey) - return intoto_v001.V001Entry{ - IntotoObj: models.IntotoV001Schema{ - Content: &models.IntotoV001SchemaContent{ - Envelope: string(signature), - }, - PublicKey: &pub, - }, - } -} - func rekorEntry(payload, signature, pubKey []byte) hashedrekord_v001.V001Entry { // TODO: Signatures created on a digest using a hash algorithm other than SHA256 will fail // upload right now. Plumb information on the hash algorithm used when signing from the @@ -214,32 +245,108 @@ func ComputeLeafHash(e *models.LogEntryAnon) ([]byte, error) { return rfc6962.DefaultHasher.HashLeaf(entryBytes), nil } -func verifyUUID(uuid string, e models.LogEntryAnon) error { - entryUUID, _ := hex.DecodeString(uuid) +func getUUID(entryUUID string) (string, error) { + switch len(entryUUID) { + case uuidHexStringLen: + if _, err := hex.DecodeString(entryUUID); err != nil { + return "", fmt.Errorf("uuid %v is not a valid hex string: %w", entryUUID, err) + } + return entryUUID, nil + case entryIDHexStringLen: + uid := entryUUID[len(entryUUID)-uuidHexStringLen:] + return getUUID(uid) + default: + return "", fmt.Errorf("invalid ID len %v for %v", len(entryUUID), entryUUID) + } +} + +func getTreeUUID(entryUUID string) (string, error) { + switch len(entryUUID) { + case uuidHexStringLen: + // No Tree ID provided + return "", nil + case entryIDHexStringLen: + tid := entryUUID[:treeIDHexStringLen] + return getTreeUUID(tid) + case treeIDHexStringLen: + // Check that it's a valid int64 in hex (base 16) + i, err := strconv.ParseInt(entryUUID, 16, 64) + if err != nil { + return "", fmt.Errorf("could not convert treeID %v to int64: %w", entryUUID, err) + } + // Check for invalid TreeID values + if i == 0 { + return "", fmt.Errorf("0 is not a valid TreeID") + } + return entryUUID, nil + default: + return "", fmt.Errorf("invalid ID len %v for %v", len(entryUUID), entryUUID) + } +} + +// Validates UUID and also TreeID if present. +func isExpectedResponseUUID(requestEntryUUID string, responseEntryUUID string, treeid string) error { + // Comparare UUIDs + requestUUID, err := getUUID(requestEntryUUID) + if err != nil { + return err + } + responseUUID, err := getUUID(responseEntryUUID) + if err != nil { + return err + } + if requestUUID != responseUUID { + return fmt.Errorf("expected EntryUUID %s got UUID %s", requestEntryUUID, responseEntryUUID) + } + // Compare tree ID if it is in the request. + requestTreeID, err := getTreeUUID(requestEntryUUID) + if err != nil { + return err + } + if requestTreeID != "" { + tid, err := getTreeUUID(treeid) + if err != nil { + return err + } + if requestTreeID != tid { + return fmt.Errorf("expected EntryUUID %s got UUID %s from Tree %s", requestEntryUUID, responseEntryUUID, treeid) + } + } + return nil +} + +func verifyUUID(entryUUID string, e models.LogEntryAnon) error { + // Verify and get the UUID. + uid, err := getUUID(entryUUID) + if err != nil { + return err + } + uuid, _ := hex.DecodeString(uid) // Verify leaf hash matches hash of the entry body. computedLeafHash, err := ComputeLeafHash(&e) if err != nil { return err } - if !bytes.Equal(computedLeafHash, entryUUID) { - return fmt.Errorf("computed leaf hash did not match entry UUID") + if !bytes.Equal(computedLeafHash, uuid) { + return fmt.Errorf("computed leaf hash did not match UUID") } return nil } -func GetTlogEntry(ctx context.Context, rekorClient *client.Rekor, uuid string) (*models.LogEntryAnon, error) { +func GetTlogEntry(ctx context.Context, rekorClient *client.Rekor, entryUUID string) (*models.LogEntryAnon, error) { params := entries.NewGetLogEntryByUUIDParamsWithContext(ctx) - params.SetEntryUUID(uuid) + params.SetEntryUUID(entryUUID) resp, err := rekorClient.Entries.GetLogEntryByUUID(params) if err != nil { return nil, err } for k, e := range resp.Payload { - // Check that body hash matches UUID - if k != uuid { - return nil, fmt.Errorf("unexpected entry returned from rekor server") + // Validate that request EntryUUID matches the response UUID and response Tree ID + if err := isExpectedResponseUUID(entryUUID, k, *e.LogID); err != nil { + return nil, fmt.Errorf("unexpected entry returned from rekor server: %w", err) } + // Check that body hash matches UUID if err := verifyUUID(k, e); err != nil { return nil, err } @@ -252,18 +359,17 @@ func proposedEntry(b64Sig string, payload, pubKey []byte) ([]models.ProposedEntr var proposedEntry []models.ProposedEntry signature, err := base64.StdEncoding.DecodeString(b64Sig) if err != nil { - return nil, errors.Wrap(err, "decoding base64 signature") + return nil, fmt.Errorf("decoding base64 signature: %w", err) } // The fact that there's no signature (or empty rather), implies // that this is an Attestation that we're verifying. if len(signature) == 0 { - te := intotoEntry(payload, pubKey) - entry := &models.Intoto{ - APIVersion: swag.String(te.APIVersion()), - Spec: te.IntotoObj, + e, err := intotoEntry(context.Background(), payload, pubKey) + if err != nil { + return nil, err } - proposedEntry = []models.ProposedEntry{entry} + proposedEntry = []models.ProposedEntry{e} } else { re := rekorEntry(payload, signature, pubKey) entry := &models.Hashedrekord{ @@ -288,7 +394,7 @@ func FindTlogEntry(ctx context.Context, rekorClient *client.Rekor, b64Sig string searchParams.SetEntry(&searchLogQuery) resp, err := rekorClient.Entries.SearchLogQuery(searchParams) if err != nil { - return nil, errors.Wrap(err, "searching log query") + return nil, fmt.Errorf("searching log query: %w", err) } if len(resp.Payload) == 0 { return nil, errors.New("signature not found in transparency log") @@ -326,6 +432,7 @@ func FindTLogEntriesByPayload(ctx context.Context, rekorClient *client.Rekor, pa return searchIndex.GetPayload(), nil } +// VerityTLogEntry verifies a TLog entry. func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.LogEntryAnon) error { if e.Verification == nil || e.Verification.InclusionProof == nil { return errors.New("inclusion proof not provided") @@ -345,9 +452,9 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L leafHash := rfc6962.DefaultHasher.HashLeaf(entryBytes) // Verify the inclusion proof. - v := logverifier.New(rfc6962.DefaultHasher) - if err := v.VerifyInclusionProof(*e.Verification.InclusionProof.LogIndex, *e.Verification.InclusionProof.TreeSize, hashes, rootHash, leafHash); err != nil { - return errors.Wrap(err, "verifying inclusion proof") + if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(*e.Verification.InclusionProof.LogIndex), uint64(*e.Verification.InclusionProof.TreeSize), + leafHash, hashes, rootHash); err != nil { + return fmt.Errorf("verifying inclusion proof: %w", err) } // Verify rekor's signature over the SET. @@ -358,26 +465,9 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L LogID: *e.LogID, } - rekorPubKeys, err := GetRekorPubs(ctx) + rekorPubKeys, err := GetRekorPubs(ctx, rekorClient) if err != nil { - return errors.Wrap(err, "unable to fetch Rekor public keys from TUF repository") - } - - addRekorPublic := os.Getenv(addRekorPublicKeyFromRekor) - if addRekorPublic != "" { - pubOK, err := rekorClient.Pubkey.GetPublicKey(nil) - if err != nil { - return errors.Wrap(err, "unable to fetch rekor public key from rekor") - } - pubFromAPI, err := PemToECDSAKey([]byte(pubOK.Payload)) - if err != nil { - return errors.Wrap(err, "error converting rekor PEM public key from rekor to ECDSAKey") - } - keyID, err := getLogID(pubFromAPI) - if err != nil { - return errors.Wrap(err, "error generating log ID") - } - rekorPubKeys[keyID] = RekorPubKey{PubKey: pubFromAPI, Status: tuf.Active} + return fmt.Errorf("unable to fetch Rekor public keys: %w", err) } pubKey, ok := rekorPubKeys[payload.LogID] @@ -386,7 +476,7 @@ func VerifyTLogEntry(ctx context.Context, rekorClient *client.Rekor, e *models.L } err = VerifySET(payload, []byte(e.Verification.SignedEntryTimestamp), pubKey.PubKey) if err != nil { - return errors.Wrap(err, "verifying signedEntryTimestamp") + return fmt.Errorf("verifying signedEntryTimestamp: %w", err) } if pubKey.Status != tuf.Active { fmt.Fprintf(os.Stderr, "**Info** Successfully verified Rekor entry using an expired verification key\n") diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/client.go b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/client.go deleted file mode 100644 index b37902bc3b..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/client.go +++ /dev/null @@ -1,618 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tuf - -import ( - "bytes" - "context" - "embed" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/theupdateframework/go-tuf/client" - tuf_leveldbstore "github.com/theupdateframework/go-tuf/client/leveldbstore" - "github.com/theupdateframework/go-tuf/data" - "github.com/theupdateframework/go-tuf/util" -) - -const ( - DefaultRemoteRoot = "sigstore-tuf-root" - TufRootEnv = "TUF_ROOT" - SigstoreNoCache = "SIGSTORE_NO_CACHE" -) - -type TUF struct { - client *client.Client - targets targetImpl - local client.LocalStore - remote client.RemoteStore - embedded bool // local embedded or cache - mirror string // location of mirror -} - -// JSON output representing the configured root status -type RootStatus struct { - Local string `json:"local"` - Remote string `json:"remote"` - Metadata map[string]MetadataStatus `json:"metadata"` - Targets []string `json:"targets"` -} - -type MetadataStatus struct { - Version int `json:"version"` - Size int `json:"len"` - Expiration string `json:"expiration"` - Error string `json:"error"` -} - -type TargetFile struct { - Target []byte - Status StatusKind -} - -type customMetadata struct { - Usage UsageKind `json:"usage"` - Status StatusKind `json:"status"` -} - -type sigstoreCustomMetadata struct { - Sigstore customMetadata `json:"sigstore"` -} - -type signedMeta struct { - Type string `json:"_type"` - Expires time.Time `json:"expires"` - Version int64 `json:"version"` -} - -// RemoteCache contains information to cache on the location of the remote -// repository. -type remoteCache struct { - Mirror string `json:"mirror"` -} - -func getExpiration(metadata []byte) (*time.Time, error) { - s := &data.Signed{} - if err := json.Unmarshal(metadata, s); err != nil { - return nil, err - } - sm := &signedMeta{} - if err := json.Unmarshal(s.Signed, sm); err != nil { - return nil, err - } - return &sm.Expires, nil -} - -func getVersion(metadata []byte) (int64, error) { - s := &data.Signed{} - if err := json.Unmarshal(metadata, s); err != nil { - return 0, err - } - sm := &signedMeta{} - if err := json.Unmarshal(s.Signed, sm); err != nil { - return 0, err - } - return sm.Version, nil -} - -var isExpiredTimestamp = func(metadata []byte) bool { - expiration, err := getExpiration(metadata) - if err != nil { - return true - } - return time.Until(*expiration) <= 0 -} - -func getMetadataStatus(b []byte) (*MetadataStatus, error) { - expires, err := getExpiration(b) - if err != nil { - return nil, err - } - version, err := getVersion(b) - if err != nil { - return nil, err - } - return &MetadataStatus{ - Size: len(b), - Expiration: expires.Format(time.RFC822), - Version: int(version), - }, nil -} - -func (t *TUF) getRootStatus() (*RootStatus, error) { - local := "embedded" - if !t.embedded { - local = rootCacheDir() - } - status := &RootStatus{ - Local: local, - Remote: t.mirror, - Metadata: make(map[string]MetadataStatus), - Targets: []string{}, - } - - // Get targets - targets, err := t.client.Targets() - if err != nil { - return nil, err - } - for t := range targets { - status.Targets = append(status.Targets, t) - } - - // Get metadata expiration - trustedMeta, err := t.local.GetMeta() - if err != nil { - return nil, errors.Wrap(err, "getting trusted meta") - } - for role, md := range trustedMeta { - mdStatus, err := getMetadataStatus(md) - if err != nil { - status.Metadata[role] = MetadataStatus{Error: err.Error()} - continue - } - status.Metadata[role] = *mdStatus - } - - return status, nil -} - -func getRoot(meta map[string]json.RawMessage) (json.RawMessage, error) { - trustedRoot, ok := meta["root.json"] - if ok { - return trustedRoot, nil - } - // On first initialize, there will be no root in the TUF DB, so read from embedded. - trustedRoot, err := embeddedRootRepo.ReadFile(path.Join("repository", "root.json")) - if err != nil { - return nil, err - } - return trustedRoot, nil -} - -// GetRootStatus gets the current root status for info logging -func GetRootStatus(ctx context.Context) (*RootStatus, error) { - t, err := NewFromEnv(ctx) - if err != nil { - return nil, err - } - defer t.Close() - return t.getRootStatus() -} - -// Close closes the local TUF store. Should only be called once per client. -func (t *TUF) Close() error { - return t.local.Close() -} - -// initializeTUF creates a TUF client using the following params: -// * embed: indicates using the embedded metadata and in-memory file updates. -// When this is false, this uses a filesystem cache. -// * mirror: provides a reference to a remote GCS or HTTP mirror. -// * root: provides an external initial root.json. When this is not provided, this -// defaults to the embedded root.json. -// * forceUpdate: indicates checking the remote for an update, even when the local -// timestamp.json is up to date. -func initializeTUF(ctx context.Context, embed bool, mirror string, root []byte, forceUpdate bool) (*TUF, error) { - t := &TUF{ - mirror: mirror, - embedded: embed, - } - - var err error - if t.embedded { - t.local, err = embeddedLocalStore() - if err != nil { - return nil, err - } - t.targets = newEmbeddedImpl() - } else { - tufDB := filepath.Join(rootCacheDir(), "tuf.db") - t.local, err = localStore(tufDB) - if err != nil { - return nil, err - } - t.targets = newFileImpl() - } - - t.remote, err = remoteFromMirror(ctx, t.mirror) - if err != nil { - t.Close() - return nil, err - } - - t.client = client.NewClient(t.local, t.remote) - - trustedMeta, err := t.local.GetMeta() - if err != nil { - t.Close() - return nil, errors.Wrap(err, "getting trusted meta") - } - - if root == nil { - root, err = getRoot(trustedMeta) - if err != nil { - t.Close() - return nil, errors.Wrap(err, "getting trusted root") - } - } - - if err := t.client.InitLocal(root); err != nil { - t.Close() - return nil, errors.Wrap(err, "unable to initialize client, local cache may be corrupt") - } - - // We have our local store, whether it was embedded or not! - // Now check to see if it needs to be updated. - trustedTimestamp, ok := trustedMeta["timestamp.json"] - if ok && !isExpiredTimestamp(trustedTimestamp) && !forceUpdate { - return t, nil - } - - // Update when timestamp is out of date. - if err := t.updateMetadataAndDownloadTargets(); err != nil { - t.Close() - return nil, errors.Wrap(err, "updating local metadata and targets") - } - - return t, err -} - -func NewFromEnv(ctx context.Context) (*TUF, error) { - // Get local and mirror from env - tufDB := filepath.Join(rootCacheDir(), "tuf.db") - var embed bool - - // Check for the current local. - _, statErr := os.Stat(tufDB) - switch { - case os.IsNotExist(statErr): - // There is no root at the location, use embedded. - embed = true - case statErr != nil: - // Some other error, bail - return nil, statErr - default: - // There is a root! Happy path. - embed = false - } - - // Check for the current remote mirror. - mirror := DefaultRemoteRoot - b, err := os.ReadFile(cachedRemote(rootCacheDir())) - if err == nil { - remoteInfo := remoteCache{} - if err := json.Unmarshal(b, &remoteInfo); err == nil { - mirror = remoteInfo.Mirror - } - } - - // Initializes a new TUF object from the local cache or defaults. - return initializeTUF(ctx, embed, mirror, nil, false) -} - -func Initialize(ctx context.Context, mirror string, root []byte) error { - // Initialize the client. Force an update. - t, err := initializeTUF(ctx, false, mirror, root, true) - if err != nil { - return err - } - t.Close() - - // Store the remote for later. - remoteInfo := &remoteCache{Mirror: mirror} - b, err := json.Marshal(remoteInfo) - if err != nil { - return err - } - if err := os.WriteFile(cachedRemote(rootCacheDir()), b, 0600); err != nil { - return errors.Wrap(err, "storing remote") - } - return nil -} - -func (t *TUF) GetTarget(name string) ([]byte, error) { - // Get valid target metadata. Does a local verification. - validMeta, err := t.client.Target(name) - if err != nil { - return nil, errors.Wrap(err, "error verifying local metadata; local cache may be corrupt") - } - - targetBytes, err := t.targets.Get(name) - if err != nil { - return nil, err - } - - localMeta, err := util.GenerateTargetFileMeta(bytes.NewReader(targetBytes)) - if err != nil { - return nil, err - } - if err := util.TargetFileMetaEqual(localMeta, validMeta); err != nil { - return nil, err - } - - return targetBytes, nil -} - -// Get target files by a custom usage metadata tag. If there are no files found, -// use the fallback target names to fetch the targets by name. -func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFile, error) { - targets, err := t.client.Targets() - if err != nil { - return nil, errors.Wrap(err, "error getting targets") - } - var matchedTargets []TargetFile - for name, targetMeta := range targets { - // Skip any targets that do not include custom metadata. - if targetMeta.Custom == nil { - continue - } - var scm sigstoreCustomMetadata - err := json.Unmarshal(*targetMeta.Custom, &scm) - if err != nil { - fmt.Fprintf(os.Stderr, "**Warning** Custom metadata not configured properly for target %s, skipping target\n", name) - continue - } - if scm.Sigstore.Usage == usage { - target, err := t.GetTarget(name) - if err != nil { - return nil, errors.Wrap(err, "error getting target by usage") - } - matchedTargets = append(matchedTargets, TargetFile{Target: target, Status: scm.Sigstore.Status}) - } - } - if len(matchedTargets) == 0 { - for _, fallback := range fallbacks { - target, err := t.GetTarget(fallback) - if err != nil { - fmt.Fprintf(os.Stderr, "**Warning** Missing fallback target %s, skipping\n", fallback) - continue - } - matchedTargets = append(matchedTargets, TargetFile{Target: target, Status: Active}) - } - } - if len(matchedTargets) == 0 { - return matchedTargets, fmt.Errorf("no matching targets by custom metadata, fallbacks not found: %s", strings.Join(fallbacks, ", ")) - } - return matchedTargets, nil -} - -func localStore(cacheRoot string) (client.LocalStore, error) { - local, err := tuf_leveldbstore.FileLocalStore(cacheRoot) - if err != nil { - return nil, errors.Wrap(err, "creating cached local store") - } - return local, nil -} - -func embeddedLocalStore() (client.LocalStore, error) { - local := client.MemoryLocalStore() - for _, mdFilename := range []string{"root.json", "targets.json", "snapshot.json", "timestamp.json"} { - b, err := embeddedRootRepo.ReadFile(path.Join("repository", mdFilename)) - if err != nil { - return nil, errors.Wrap(err, "reading embedded file") - } - if err := local.SetMeta(mdFilename, b); err != nil { - return nil, errors.Wrap(err, "setting local meta") - } - } - return local, nil -} - -func (t *TUF) updateMetadataAndDownloadTargets() error { - // Download updated targets and cache new metadata and targets in ${TUF_ROOT}. - targetFiles, err := t.client.Update() - if err != nil && !client.IsLatestSnapshot(err) { - // Get some extra information for debugging. What was the state of the metadata - // on the remote? - status := struct { - Mirror string `json:"mirror"` - Metadata map[string]MetadataStatus `json:"metadata"` - }{ - Mirror: t.mirror, - Metadata: make(map[string]MetadataStatus), - } - for _, md := range []string{"root.json", "targets.json", "snapshot.json", "timestamp.json"} { - r, _, err := t.remote.GetMeta(md) - if err != nil { - // May be missing, or failed download. - continue - } - defer r.Close() - b, err := ioutil.ReadAll(r) - if err != nil { - continue - } - mdStatus, err := getMetadataStatus(b) - if err != nil { - continue - } - status.Metadata[md] = *mdStatus - } - b, innerErr := json.MarshalIndent(status, "", "\t") - if innerErr != nil { - return innerErr - } - return fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) - } - - // Update the in-memory targets. - // If the cache directory is enabled, update that too. - for name := range targetFiles { - buf := bytes.Buffer{} - if err := downloadRemoteTarget(name, t.client, &buf); err != nil { - return err - } - if err := t.targets.Set(name, buf.Bytes()); err != nil { - return err - } - } - - return nil -} - -type targetDestination struct { - buf bytes.Buffer -} - -func (t *targetDestination) Write(b []byte) (int, error) { - return t.buf.Write(b) -} - -func (t *targetDestination) Delete() error { - t.buf = bytes.Buffer{} - return nil -} - -func downloadRemoteTarget(name string, c *client.Client, w io.Writer) error { - dest := targetDestination{} - if err := c.Download(name, &dest); err != nil { - return errors.Wrap(err, "downloading target") - } - _, err := io.Copy(w, &dest.buf) - return err -} - -func rootCacheDir() string { - rootDir := os.Getenv(TufRootEnv) - if rootDir == "" { - home, err := os.UserHomeDir() - if err != nil { - home = "" - } - return filepath.Join(home, ".sigstore", "root") - } - return rootDir -} - -func cachedRemote(cacheRoot string) string { - return filepath.Join(cacheRoot, "remote.json") -} - -func cachedTargetsDir(cacheRoot string) string { - return filepath.Join(cacheRoot, "targets") -} - -type targetImpl interface { - Get(string) ([]byte, error) - setImpl -} - -type setImpl interface { - Set(string, []byte) error -} - -type memoryCache struct { - targets map[string][]byte -} - -func (m *memoryCache) Set(p string, b []byte) error { - if m.targets == nil { - m.targets = map[string][]byte{} - } - m.targets[p] = b - return nil -} - -//go:embed repository -var embeddedRootRepo embed.FS - -type embedded struct { - setImpl -} - -func (e *embedded) Get(p string) ([]byte, error) { - b, err := embeddedRootRepo.ReadFile(path.Join("repository", "targets", p)) - if err != nil { - return nil, err - } - // Unfortunately go:embed appears to somehow replace our line endings on windows, we need to switch them back. - // It should theoretically be safe to do this everywhere - but the files only seem to get mutated on Windows so - // let's only change them back there. - if runtime.GOOS == "windows" { - return bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n")), nil - } - return b, nil -} - -type file struct { - base string - setImpl -} - -func (f *file) Get(p string) ([]byte, error) { - fp := filepath.Join(f.base, p) - return os.ReadFile(fp) -} - -type diskCache struct { - base string -} - -func (d *diskCache) Set(p string, b []byte) error { - if err := os.MkdirAll(d.base, 0700); err != nil { - return errors.Wrap(err, "creating targets dir") - } - fp := filepath.Join(d.base, p) - return os.WriteFile(fp, b, 0600) -} - -func noCache() bool { - b, err := strconv.ParseBool(os.Getenv(SigstoreNoCache)) - if err != nil { - return false - } - return b -} - -func newEmbeddedImpl() targetImpl { - e := &embedded{} - if noCache() { - e.setImpl = &memoryCache{} - } else { - e.setImpl = &diskCache{base: cachedTargetsDir(rootCacheDir())} - } - return e -} - -func newFileImpl() targetImpl { - base := cachedTargetsDir(rootCacheDir()) - f := &file{base: base} - if noCache() { - f.setImpl = &memoryCache{} - } else { - f.setImpl = &diskCache{base: base} - } - return f -} - -func remoteFromMirror(ctx context.Context, mirror string) (client.RemoteStore, error) { - if _, parseErr := url.ParseRequestURI(mirror); parseErr != nil { - return GcsRemoteStore(ctx, mirror, nil, nil) - } - return client.HTTPRemoteStore(mirror, nil, nil) -} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/1.root.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/1.root.json deleted file mode 100644 index dcc71f963a..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/1.root.json +++ /dev/null @@ -1,130 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "30450221008a35d51da0f845301a5eac98ad0df00a934f59b709c1eaf81c86be734d9356f80220742942325599749800f52675f6efe124345980a2a636c0dc76f9caf9fc3123b0" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "3045022100ef9157ece2a09baec1eab80adfc00b04da20b1f9a0d1b47c5dabc4506719ef2c022074f72acd57398e4ddc8c2a5040df902961e9615dca48f3fbe38cbb506e500066" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "30450220420fdc9a09cd069b8b15fd8db9cedf7d0dee75871bd1cfee77c926d4120a770002210097553b5ad0d6b4a13902ed37509638bb63a9009f78230cd56c802909ffbfead7" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304502202aaf32e66f90752f658672b085ecfe45cc1ad31ee6cf5c9ad05f3267685f8d88022100b5df02acdaa371123db9d7a42219553fe079b230b168833e951be7ee56ded347" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "304402205d420c7d05c58980c1c9f7d221f53b5334aae27a447d2a91c2ceddd685269749022039ec83e51f8e1779d7f0142dfa4a5bbecfe327fc0b91b7416090fea2416fd53a" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": false, - "expires": "2021-12-18T13:28:12.99008-06:00", - "keys": { - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cbc5cab2684160323c25cd06c3307178a6b1d1c9b949328453ae473c5ba7527e35b13f298b41633382241f3fd8526c262d43b45adee5c618fa0642c82b8a9803" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04a71aacd835dc170ba6db3fa33a1a33dee751d4f8b0217b805b9bd3242921ee93672fdcfd840576c5bb0dc0ed815edf394c1ee48c2b5e02485e59bfc512f3adc7" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04117b33dd265715bf23315e368faa499728db8d1f0a377070a1c7b1aba2cc21be6ab1628e42f2cdd7a35479f2dce07b303a8ba646c55569a8d2a504ba7e86e447" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cc1cd53a61c23e88cc54b488dfae168a257c34fac3e88811c55962b24cffbfecb724447999c54670e365883716302e49da57c79a33cd3e16f81fbc66f0bcdf48" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "048a78a44ac01099890d787e5e62afc29c8ccb69a70ec6549a6b04033b0a8acbfb42ab1ab9c713d225cdb52b858886cf46c8e90a7f3b9e6371882f370c259e1c5b" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": { - "root": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "snapshot": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "targets": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "timestamp": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - } - }, - "spec_version": "1.0", - "version": 1 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/2.root.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/2.root.json deleted file mode 100644 index 386ebe62c1..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/2.root.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "3046022100d3ea59490b253beae0926c6fa63f54336dea1ed700555be9f27ff55cd347639c0221009157d1ba012cead81948a4ab777d355451d57f5c4a2d333fc68d2e3f358093c2" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "304502206eaef40564403ce572c6d062e0c9b0aab5e0223576133e081e1b495e8deb9efd02210080fd6f3464d759601b4afec596bbd5952f3a224cd06ed1cdfc3c399118752ba2" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "304502207baace02f56d8e6069f10b6ff098a26e7f53a7f9324ad62cffa0557bdeb9036c022100fb3032baaa090d0040c3f2fd872571c84479309b773208601d65948df87a9720" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304402205180c01905505dd88acd7a2dad979dd75c979b3722513a7bdedac88c6ae8dbeb022056d1ddf7a192f0b1c2c90ff487de2fb3ec9f0c03f66ea937c78d3b6a493504ca" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "3046022100c8806d4647c514d80fd8f707d3369444c4fd1d0812a2d25f828e564c99790e3f022100bb51f12e862ef17a7d3da2ac103bebc5c7e792237006c4cafacd76267b249c2f" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": false, - "expires": "2022-05-11T19:09:02.663975009Z", - "keys": { - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cbc5cab2684160323c25cd06c3307178a6b1d1c9b949328453ae473c5ba7527e35b13f298b41633382241f3fd8526c262d43b45adee5c618fa0642c82b8a9803" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04fa1a3e42f2300cd3c5487a61509348feb1e936920fef2f83b7cd5dbe7ba045f538725ab8f18a666e6233edb7e0db8766c8dc336633449c5e1bbe0c182b02df0b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04a71aacd835dc170ba6db3fa33a1a33dee751d4f8b0217b805b9bd3242921ee93672fdcfd840576c5bb0dc0ed815edf394c1ee48c2b5e02485e59bfc512f3adc7" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04117b33dd265715bf23315e368faa499728db8d1f0a377070a1c7b1aba2cc21be6ab1628e42f2cdd7a35479f2dce07b303a8ba646c55569a8d2a504ba7e86e447" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "04cc1cd53a61c23e88cc54b488dfae168a257c34fac3e88811c55962b24cffbfecb724447999c54670e365883716302e49da57c79a33cd3e16f81fbc66f0bcdf48" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "048a78a44ac01099890d787e5e62afc29c8ccb69a70ec6549a6b04033b0a8acbfb42ab1ab9c713d225cdb52b858886cf46c8e90a7f3b9e6371882f370c259e1c5b" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "044c7793ab74b9ddd713054e587b8d9c75c5f6025633d0fef7ca855ed5b8d5a474b23598fe33eb4a63630d526f74d4bdaec8adcb51993ed65652d651d7c49203eb" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": { - "root": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "snapshot": { - "keyids": [ - "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451" - ], - "threshold": 1 - }, - "targets": { - "keyids": [ - "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209" - ], - "threshold": 3 - }, - "timestamp": { - "keyids": [ - "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d" - ], - "threshold": 1 - } - }, - "spec_version": "1.0", - "version": 2 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/snapshot.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/snapshot.json deleted file mode 100644 index c0e3ca3030..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/snapshot.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "signatures": [ - { - "keyid": "fc61191ba8a516fe386c7d6c97d918e1d241e1589729add09b122725b8c32451", - "sig": "3045022100cffd67c2d0339acb9d045f06dfbc81cfedbb49c0b68f95df3d449198d7957a6d022071a28a25789bbe13b8838c88f1f48d401fb15151a0689f51007dbca502408d38" - } - ], - "signed": { - "_type": "snapshot", - "expires": "2022-01-29T00:38:36Z", - "meta": { - "rekor.json": { - "hashes": { - "sha256": "a7412a87f8d7b330e0380b19a4a76c00357c39a1aa7f56fd87445d4e12faafe4", - "sha512": "720cb3c42bac50c5bc3cb7076e730301ef29f1893ea52e25f9393fc05851c7a531638c42d9fc992969805982a2bf51d676e33d28a7382ea589b5a9f87474c63f" - }, - "length": 697, - "version": 1 - }, - "root.json": { - "hashes": { - "sha256": "f5ad897c9414cca99629f400ac3585e41bd8ebb44c5af07fb08dd636a9eced9c", - "sha512": "7445ddfdd338ef786c324fc3d68f75be28cb95b7fb581d2a383e3e5dde18aa17029a5636ec0a22e9631931bbcb34057788311718ea41e21e7cdd3c0de13ede42" - }, - "length": 5297, - "version": 2 - }, - "staging.json": { - "hashes": { - "sha256": "c7f32379c2a76f0ec0af84e86794a8f4fe285e44fb62f336d598810dccdc7343", - "sha512": "5462cb15fe5248a12cc12387a732ad43caf42391361f36113ea3d4b7e5e193cdf39fbe91c309c0691134377cb83afeba50cf6d711537d8280ce16ce9cd8752ba" - }, - "length": 399, - "version": 1 - }, - "targets.json": { - "hashes": { - "sha256": "18d10c07c8d6bd7484772b02dcc988d0abf8a0fa379d5893a502410590c17fe6", - "sha512": "c2ba2a84820288997c8fae264776df7b262dde97c4f9e0320ad354879ce5afabd1d43494734fecffd23253442a14cfe217787de8b65cf7fd1f03130b72a0767c" - }, - "length": 4167, - "version": 2 - } - }, - "spec_version": "1.0", - "version": 10 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/staging.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/staging.json deleted file mode 100644 index 084010de75..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/staging.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "signatures": [ - { - "keyid": "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4", - "sig": "304502204486f7b23eadb69df87776ac7a4938ac75a8a2b2e93c84c05d962373837ea91c022100aaeb0fa587430f49618711bb4bd0c1092637c22c223d03c0f1b5a09baea0ed9f" - } - ], - "signed": { - "_type": "targets", - "expires": "2022-02-11T20:10:16Z", - "spec_version": "1.0", - "targets": {}, - "version": 1 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets.json deleted file mode 100644 index b26926a438..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets.json +++ /dev/null @@ -1,117 +0,0 @@ -{ - "signatures": [ - { - "keyid": "2f64fb5eac0cf94dd39bb45308b98920055e9a0d8e012a7220787834c60aef97", - "sig": "3046022100cc1b2ed390e75a112c0fdd6bcbd8bb775300a410f5737ae39996b1858753c8e4022100b591f73370e9378914fb2fab837f700661abd1a74c680f139f6164ec12cb538f" - }, - { - "keyid": "bdde902f5ec668179ff5ca0dabf7657109287d690bf97e230c21d65f99155c62", - "sig": "3045022100bc6c45a125e45507339af96aa63983e847565c769f20d7d71bcd2deb7bd36ea902202bf6bd3b76d434c318287899e53f64b4dc178eb0ba403080f1c4fba88a2177ca" - }, - { - "keyid": "eaf22372f417dd618a46f6c627dbc276e9fd30a004fc94f9be946e73f8bd090b", - "sig": "304502210085d5bc8a158d31536b4e76cddceef25185c7abbe9091b84f5f2b0d615d9b4ee90220136a36fed2d5986c2519b7d165556f20dfe41fddececda48dffa8dec5258cb95" - }, - { - "keyid": "f40f32044071a9365505da3d1e3be6561f6f22d0e60cf51df783999f6c3429cb", - "sig": "304402202fe73a61dfe05b4202bc50f66e52bba3d3475134434dab9576735caed659b03c0220449755a87f4dab9961566f10477204637b2415f87e162b58a23b13327dec53e3" - }, - { - "keyid": "f505595165a177a41750a8e864ed1719b1edfccd5a426fd2c0ffda33ce7ff209", - "sig": "304602210091f453ef75c5178299175734355a65a2fc2d0ee137410f46ba8439d99037fc08022100fc800d15f0b751fa225a77542928f4264835c013054a5c409c674e2ea5a70384" - } - ], - "signed": { - "_type": "targets", - "delegations": { - "keys": { - "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "043463588ae9df33a419d1099761245af52aaf7e638b2047bc0f739a62de9808c50a21ea8a1a273799f857f31a1bcb66e6661dd9d5ac7ac3ca260b0b8130c3fed8" - }, - "scheme": "ecdsa-sha2-nistp256" - }, - "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ecdsa-sha2-nistp256", - "keyval": { - "public": "041b4b13a6e7110292d284c0dbfc3962a12d2a779a800c99aff59c6afe779296943c75d84aa5bad0be28e4061cf93e0cd3d372d9b2f75ea9f29b907cbccd82006f" - }, - "scheme": "ecdsa-sha2-nistp256" - } - }, - "roles": [ - { - "keyids": [ - "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217" - ], - "name": "rekor", - "paths": [ - "rekor.*.pub" - ], - "terminating": true, - "threshold": 1 - }, - { - "keyids": [ - "b811bd53f2d7adcf5d93e6bb4a8ed2e0ca0f83d454a3e51f105c8e8376bc80d4" - ], - "name": "staging", - "paths": [ - "*" - ], - "terminating": false, - "threshold": 1 - } - ] - }, - "expires": "2022-05-11T19:10:16Z", - "spec_version": "1.0", - "targets": { - "artifact.pub": { - "hashes": { - "sha256": "59ebf97a9850aecec4bc39c1f5c1dc46e6490a6b5fd2a6cacdcac0c3a6fc4cbf", - "sha512": "308fd1d1d95d7f80aa33b837795251cc3e886792982275e062409e13e4e236ffc34d676682aa96fdc751414de99c864bf132dde71581fa651c6343905e3bf988" - }, - "length": 177 - }, - "ctfe.pub": { - "hashes": { - "sha256": "7fcb94a5d0ed541260473b990b99a6c39864c1fb16f3f3e594a5a3cebbfe138a", - "sha512": "4b20747d1afe2544238ad38cc0cc3010921b177d60ac743767e0ef675b915489bd01a36606c0ff83c06448622d7160f0d866c83d20f0c0f44653dcc3f9aa0bd4" - }, - "length": 177 - }, - "fulcio.crt.pem": { - "hashes": { - "sha256": "f360c53b2e13495a628b9b8096455badcb6d375b185c4816d95a5d746ff29908", - "sha512": "0713252a7fd17f7f3ab12f88a64accf2eb14b8ad40ca711d7fe8b4ecba3b24db9e9dffadb997b196d3867b8f9ff217faf930d80e4dab4e235c7fc3f07be69224" - }, - "length": 744 - }, - "fulcio_v1.crt.pem": { - "hashes": { - "sha256": "f989aa23def87c549404eadba767768d2a3c8d6d30a8b793f9f518a8eafd2cf5", - "sha512": "f2e33a6dc208cee1f51d33bbea675ab0f0ced269617497985f9a0680689ee7073e4b6f8fef64c91bda590d30c129b3070dddce824c05bc165ac9802f0705cab6" - }, - "length": 740 - }, - "rekor.pub": { - "hashes": { - "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd", - "sha512": "0ae7705e02db33e814329746a4a0e5603c5bdcd91c96d072158d71011a2695788866565a2fec0fe363eb72cbcaeda39e54c5fe8d416daf9f3101fdba4217ef35" - }, - "length": 178 - } - }, - "version": 2 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.json deleted file mode 100644 index f86930d537..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "signatures": [ - { - "keyid": "ae0c689c6347ada7359df48934991f4e013193d6ddf3482a5ffb293f74f3b217", - "sig": "3045022076eadd73f6664bac5cc91f12d3a7ddcdd53f9bde661f147651196ff66e7235d1022100f7b3143792405f9e8a75331a05d4128bdf083de302801e99c3d027919a4b03da" - } - ], - "signed": { - "_type": "targets", - "expires": "2022-05-11T19:10:11Z", - "spec_version": "1.0", - "targets": { - "rekor.0.pub": { - "hashes": { - "sha256": "dce5ef715502ec9f3cdfd11f8cc384b31a6141023d3e7595e9908a81cb6241bd", - "sha512": "0ae7705e02db33e814329746a4a0e5603c5bdcd91c96d072158d71011a2695788866565a2fec0fe363eb72cbcaeda39e54c5fe8d416daf9f3101fdba4217ef35" - }, - "length": 178 - } - }, - "version": 1 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/timestamp.json b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/timestamp.json deleted file mode 100644 index 2030e49d9a..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/timestamp.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "signatures": [ - { - "keyid": "b6710623a30c010738e64c5209d367df1c0a18cf90e6ab5292fb01680f83453d", - "sig": "3045022100b4cda580b371e32ce938d03676208b394942f6205eb8ebe8650ec5b366ac5cd7022038ce51f5e3de90849ff728c21179484135b286563f60c906f0db5d999fd676d7" - } - ], - "signed": { - "_type": "timestamp", - "expires": "2022-01-29T00:38:38Z", - "meta": { - "snapshot.json": { - "hashes": { - "sha256": "c4d26ba0e5c0b142c26b9cb11caedac1f29134275190da8f2cb981f2d8a13236", - "sha512": "db3c7a61418fd050a6af04f9d3bb2b359785fd5b4b323516740c76b413df0deae0d81f4bab0abce7abe077584110c00e0f35eeee5155abb7be1b74975794a8fe" - }, - "length": 1657, - "version": 10 - } - }, - "spec_version": "1.0", - "version": 10 - } -} \ No newline at end of file diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/store.go b/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/store.go deleted file mode 100644 index ea35403710..0000000000 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/store.go +++ /dev/null @@ -1,78 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tuf - -import ( - "context" - "io" - "path" - - "cloud.google.com/go/storage" - "github.com/theupdateframework/go-tuf/client" - "google.golang.org/api/option" -) - -type GcsRemoteOptions struct { - MetadataPath string - TargetsPath string -} - -type gcsRemoteStore struct { - bucket string - ctx context.Context - client *storage.Client - opts *GcsRemoteOptions -} - -// A remote store for TUF metadata on GCS. -func GcsRemoteStore(ctx context.Context, bucket string, opts *GcsRemoteOptions, client *storage.Client) (client.RemoteStore, error) { - if opts == nil { - opts = &GcsRemoteOptions{} - } - if opts.TargetsPath == "" { - opts.TargetsPath = "targets" - } - store := gcsRemoteStore{ctx: ctx, bucket: bucket, opts: opts, client: client} - if client == nil { - var err error - store.client, err = storage.NewClient(ctx, option.WithoutAuthentication()) - if err != nil { - return nil, err - } - } - return &store, nil -} - -func (h *gcsRemoteStore) GetMeta(name string) (io.ReadCloser, int64, error) { - return h.get(path.Join(h.opts.MetadataPath, name)) -} - -func (h *gcsRemoteStore) GetTarget(name string) (io.ReadCloser, int64, error) { - return h.get(path.Join(h.opts.TargetsPath, name)) -} - -func (h *gcsRemoteStore) get(s string) (io.ReadCloser, int64, error) { - obj := h.client.Bucket(h.bucket).Object(s) - attrs, err := obj.Attrs(h.ctx) - if err != nil { - return nil, 0, client.ErrNotFound{File: s} - } - rc, err := obj.NewReader(h.ctx) - if err != nil { - return nil, 0, err - } - return rc, attrs.Size, nil -} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/verifiers.go b/vendor/github.com/sigstore/cosign/pkg/cosign/verifiers.go index 5764144004..8dbae32f3b 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/verifiers.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/verifiers.go @@ -18,11 +18,11 @@ package cosign import ( "encoding/base64" "encoding/json" + "errors" "fmt" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/in-toto/in-toto-golang/in_toto" - "github.com/pkg/errors" "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/sigstore/cosign/pkg/oci" @@ -51,6 +51,7 @@ func SimpleClaimVerifier(sig oci.Signature, imageDigest v1.Hash, annotations map return errors.New("missing or incorrect annotation") } } + return nil } diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/verify.go b/vendor/github.com/sigstore/cosign/pkg/cosign/verify.go index e3b63413dd..ddba36dbdf 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/verify.go +++ b/vendor/github.com/sigstore/cosign/pkg/cosign/verify.go @@ -21,10 +21,10 @@ import ( "crypto/ecdsa" "crypto/sha256" "crypto/x509" - "encoding/asn1" "encoding/base64" "encoding/hex" "encoding/json" + "errors" "fmt" "os" "regexp" @@ -33,7 +33,7 @@ import ( "github.com/sigstore/cosign/cmd/cosign/cli/fulcio/fulcioverifier/ctl" cbundle "github.com/sigstore/cosign/pkg/cosign/bundle" - "github.com/sigstore/cosign/pkg/cosign/tuf" + "github.com/sigstore/sigstore/pkg/tuf" "github.com/sigstore/cosign/pkg/blob" "github.com/sigstore/cosign/pkg/oci/static" @@ -42,7 +42,6 @@ import ( "github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/pkg/errors" ssldsse "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/sigstore/cosign/pkg/oci" @@ -58,10 +57,13 @@ import ( ) // Identity specifies an issuer/subject to verify a signature against. -// Both Issuer/Subject support regexp. +// Both IssuerRegExp/SubjectRegExp support regexp while Issuer/Subject are for +// strict matching. type Identity struct { - Issuer string - Subject string + Issuer string + Subject string + IssuerRegExp string + SubjectRegExp string } // CheckOpts are the options for checking signatures. @@ -71,6 +73,7 @@ type CheckOpts struct { // Annotations optionally specifies image signature annotations to verify. Annotations map[string]interface{} + // ClaimVerifier, if provided, verifies claims present in the oci.Signature. ClaimVerifier func(sig oci.Signature, imageDigest v1.Hash, annotations map[string]interface{}) error @@ -90,6 +93,18 @@ type CheckOpts struct { CertEmail string // CertOidcIssuer is the OIDC issuer expected for a certificate to be valid. The empty string means any certificate can be valid. CertOidcIssuer string + + // CertGithubWorkflowTrigger is the GitHub Workflow Trigger name expected for a certificate to be valid. The empty string means any certificate can be valid. + CertGithubWorkflowTrigger string + // CertGithubWorkflowSha is the GitHub Workflow SHA expected for a certificate to be valid. The empty string means any certificate can be valid. + CertGithubWorkflowSha string + // CertGithubWorkflowName is the GitHub Workflow Name expected for a certificate to be valid. The empty string means any certificate can be valid. + CertGithubWorkflowName string + // CertGithubWorkflowRepository is the GitHub Workflow Repository expected for a certificate to be valid. The empty string means any certificate can be valid. + CertGithubWorkflowRepository string + // CertGithubWorkflowRef is the GitHub Workflow Ref expected for a certificate to be valid. The empty string means any certificate can be valid. + CertGithubWorkflowRef string + // EnforceSCT requires that a certificate contain an embedded SCT during verification. An SCT is proof of inclusion in a // certificate transparency log. EnforceSCT bool @@ -149,7 +164,7 @@ func verifyOCIAttestation(_ context.Context, verifier signature.Verifier, att pa } if env.PayloadType != types.IntotoPayloadType { - return fmt.Errorf("invalid payloadType %s on envelope. Expected %s", env.PayloadType, types.IntotoPayloadType) + return NewVerificationError("invalid payloadType %s on envelope. Expected %s", env.PayloadType, types.IntotoPayloadType) } dssev, err := ssldsse.NewEnvelopeVerifier(&dsse.VerifierAdapter{SignatureVerifier: verifier}) if err != nil { @@ -164,7 +179,7 @@ func verifyOCIAttestation(_ context.Context, verifier signature.Verifier, att pa func ValidateAndUnpackCert(cert *x509.Certificate, co *CheckOpts) (signature.Verifier, error) { verifier, err := signature.LoadVerifier(cert.PublicKey, crypto.SHA256) if err != nil { - return nil, errors.Wrap(err, "invalid certificate found on signature") + return nil, fmt.Errorf("invalid certificate found on signature: %w", err) } // Now verify the cert, then the signature. @@ -172,6 +187,36 @@ func ValidateAndUnpackCert(cert *x509.Certificate, co *CheckOpts) (signature.Ver if err != nil { return nil, err } + + err = CheckCertificatePolicy(cert, co) + if err != nil { + return nil, err + } + + contains, err := ctl.ContainsSCT(cert.Raw) + if err != nil { + return nil, err + } + if co.EnforceSCT && !contains { + return nil, &VerificationError{"certificate does not include required embedded SCT"} + } + if contains { + // handle if chains has more than one chain - grab first and print message + if len(chains) > 1 { + fmt.Fprintf(os.Stderr, "**Info** Multiple valid certificate chains found. Selecting the first to verify the SCT.\n") + } + if err := ctl.VerifyEmbeddedSCT(context.Background(), chains[0]); err != nil { + return nil, err + } + } + + return verifier, nil +} + +// CheckCertificatePolicy checks that the certificate subject and issuer match +// the expected values. +func CheckCertificatePolicy(cert *x509.Certificate, co *CheckOpts) error { + ce := CertExtensions{Cert: cert} if co.CertEmail != "" { emailVerified := false for _, em := range cert.EmailAddresses { @@ -181,38 +226,43 @@ func ValidateAndUnpackCert(cert *x509.Certificate, co *CheckOpts) (signature.Ver } } if !emailVerified { - return nil, errors.New("expected email not found in certificate") + return &VerificationError{"expected email not found in certificate"} } } - if co.CertOidcIssuer != "" { - if getIssuer(cert) != co.CertOidcIssuer { - return nil, errors.New("expected oidc issuer not found in certificate") - } + + if err := validateCertExtensions(ce, co); err != nil { + return err } + issuer := ce.GetIssuer() // If there are identities given, go through them and if one of them // matches, call that good, otherwise, return an error. if len(co.Identities) > 0 { for _, identity := range co.Identities { issuerMatches := false + switch { // Check the issuer first - if identity.Issuer != "" { - issuer := getIssuer(cert) - if regex, err := regexp.Compile(identity.Issuer); err != nil { - return nil, fmt.Errorf("malformed issuer in identity: %s : %w", identity.Issuer, err) + case identity.IssuerRegExp != "": + if regex, err := regexp.Compile(identity.IssuerRegExp); err != nil { + return fmt.Errorf("malformed issuer in identity: %s : %w", identity.IssuerRegExp, err) } else if regex.MatchString(issuer) { issuerMatches = true } - } else { + case identity.Issuer != "": + if identity.Issuer == issuer { + issuerMatches = true + } + default: // No issuer constraint on this identity, so checks out issuerMatches = true } // Then the subject subjectMatches := false - if identity.Subject != "" { - regex, err := regexp.Compile(identity.Subject) + switch { + case identity.SubjectRegExp != "": + regex, err := regexp.Compile(identity.SubjectRegExp) if err != nil { - return nil, fmt.Errorf("malformed subject in identity: %s : %w", identity.Subject, err) + return fmt.Errorf("malformed subject in identity: %s : %w", identity.SubjectRegExp, err) } for _, san := range getSubjectAlternateNames(cert) { if regex.MatchString(san) { @@ -220,34 +270,64 @@ func ValidateAndUnpackCert(cert *x509.Certificate, co *CheckOpts) (signature.Ver break } } - } else { + case identity.Subject != "": + for _, san := range getSubjectAlternateNames(cert) { + if san == identity.Subject { + subjectMatches = true + break + } + } + default: // No subject constraint on this identity, so checks out subjectMatches = true } if subjectMatches && issuerMatches { // If both issuer / subject match, return verifier - return verifier, nil + return nil } } - return nil, errors.New("none of the expected identities matched what was in the certificate") + return &VerificationError{"none of the expected identities matched what was in the certificate"} } - contains, err := ctl.ContainsSCT(cert.Raw) - if err != nil { - return nil, err + return nil +} + +func validateCertExtensions(ce CertExtensions, co *CheckOpts) error { + if co.CertOidcIssuer != "" { + if ce.GetIssuer() != co.CertOidcIssuer { + return &VerificationError{"expected oidc issuer not found in certificate"} + } } - if co.EnforceSCT && !contains { - return nil, errors.New("certificate does not include required embedded SCT") + + if co.CertGithubWorkflowTrigger != "" { + if ce.GetCertExtensionGithubWorkflowTrigger() != co.CertGithubWorkflowTrigger { + return &VerificationError{"expected GitHub Workflow Trigger not found in certificate"} + } } - if contains { - // handle if chains has more than one chain - grab first and print message - if len(chains) > 1 { - fmt.Fprintf(os.Stderr, "**Info** Multiple valid certificate chains found. Selecting the first to verify the SCT.\n") + + if co.CertGithubWorkflowSha != "" { + if ce.GetExtensionGithubWorkflowSha() != co.CertGithubWorkflowSha { + return &VerificationError{"expected GitHub Workflow SHA not found in certificate"} } - if err := ctl.VerifyEmbeddedSCT(context.Background(), chains[0]); err != nil { - return nil, err + } + + if co.CertGithubWorkflowName != "" { + if ce.GetCertExtensionGithubWorkflowName() != co.CertGithubWorkflowName { + return &VerificationError{"expected GitHub Workflow Name not found in certificate"} } } - return verifier, nil + + if co.CertGithubWorkflowRepository != "" { + if ce.GetCertExtensionGithubWorkflowRepository() != co.CertGithubWorkflowRepository { + return &VerificationError{"expected GitHub Workflow Repository not found in certificate"} + } + } + + if co.CertGithubWorkflowRef != "" { + if ce.GetCertExtensionGithubWorkflowRef() != co.CertGithubWorkflowRef { + return &VerificationError{"expected GitHub Workflow Ref not found in certificate"} + } + } + return nil } // getSubjectAlternateNames returns all of the following for a Certificate. @@ -268,16 +348,6 @@ func getSubjectAlternateNames(cert *x509.Certificate) []string { return sans } -// getIssuer returns the issuer for a Certificate -func getIssuer(cert *x509.Certificate) string { - for _, ext := range cert.Extensions { - if ext.Id.Equal(asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1}) { - return string(ext.Value) - } - } - return "" -} - // ValidateAndUnpackCertWithChain creates a Verifier from a certificate. Verifies that the certificate // chains up to the provided root. Chain should start with the parent of the certificate and end with the root. // Optionally verifies the subject and issuer of the certificate. @@ -447,7 +517,7 @@ func verifySignatures(ctx context.Context, sigs oci.Signatures, h v1.Hash, co *C checkedSignatures = append(checkedSignatures, sig) } if len(checkedSignatures) == 0 { - return nil, false, fmt.Errorf("no matching signatures:\n%s", strings.Join(validationErrs, "\n ")) + return nil, false, fmt.Errorf("%w:\n%s", ErrNoMatchingSignatures, strings.Join(validationErrs, "\n ")) } return checkedSignatures, bundleVerified, nil } @@ -462,7 +532,7 @@ func VerifyImageSignature(ctx context.Context, sig oci.Signature, h v1.Hash, co return bundleVerified, err } if cert == nil { - return bundleVerified, errors.New("no certificate found on signature") + return bundleVerified, &VerificationError{"no certificate found on signature"} } // Create a certificate pool for intermediate CA certificates, excluding the root chain, err := sig.Chain() @@ -497,9 +567,9 @@ func VerifyImageSignature(ctx context.Context, sig oci.Signature, h v1.Hash, co } } - bundleVerified, err = VerifyBundle(ctx, sig) + bundleVerified, err = VerifyBundle(ctx, sig, co.RekorClient) if err != nil && co.RekorClient == nil { - return false, errors.Wrap(err, "unable to verify bundle") + return false, fmt.Errorf("unable to verify bundle: %w", err) } if !bundleVerified && co.RekorClient != nil { @@ -641,7 +711,7 @@ func verifyImageAttestations(ctx context.Context, atts oci.Signatures, h v1.Hash return err } if cert == nil { - return errors.New("no certificate found on attestation") + return &VerificationError{"no certificate found on attestation"} } // Create a certificate pool for intermediate CA certificates, excluding the root chain, err := att.Chain() @@ -676,9 +746,9 @@ func verifyImageAttestations(ctx context.Context, atts oci.Signatures, h v1.Hash } } - verified, err := VerifyBundle(ctx, att) + verified, err := VerifyBundle(ctx, att, co.RekorClient) if err != nil && co.RekorClient == nil { - return errors.Wrap(err, "unable to verify bundle") + return fmt.Errorf("unable to verify bundle: %w", err) } bundleVerified = bundleVerified || verified @@ -703,7 +773,7 @@ func verifyImageAttestations(ctx context.Context, atts oci.Signatures, h v1.Hash checkedAttestations = append(checkedAttestations, att) } if len(checkedAttestations) == 0 { - return nil, false, fmt.Errorf("no matching attestations:\n%s", strings.Join(validationErrs, "\n ")) + return nil, false, fmt.Errorf("%w:\n%s", ErrNoMatchingAttestations, strings.Join(validationErrs, "\n ")) } return checkedAttestations, bundleVerified, nil } @@ -714,17 +784,17 @@ func CheckExpiry(cert *x509.Certificate, it time.Time) error { return t.Format(time.RFC3339) } if cert.NotAfter.Before(it) { - return fmt.Errorf("certificate expired before signatures were entered in log: %s is before %s", + return NewVerificationError("certificate expired before signatures were entered in log: %s is before %s", ft(cert.NotAfter), ft(it)) } if cert.NotBefore.After(it) { - return fmt.Errorf("certificate was issued after signatures were entered in log: %s is after %s", + return NewVerificationError("certificate was issued after signatures were entered in log: %s is after %s", ft(cert.NotAfter), ft(it)) } return nil } -func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { +func VerifyBundle(ctx context.Context, sig oci.Signature, rekorClient *client.Rekor) (bool, error) { bundle, err := sig.Bundle() if err != nil { return false, err @@ -736,14 +806,14 @@ func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { return false, err } - publicKeys, err := GetRekorPubs(ctx) + publicKeys, err := GetRekorPubs(ctx, rekorClient) if err != nil { - return false, errors.Wrap(err, "retrieving rekor public key") + return false, fmt.Errorf("retrieving rekor public key: %w", err) } pubKey, ok := publicKeys[bundle.Payload.LogID] if !ok { - return false, errors.New("rekor log public key not found for payload") + return false, &VerificationError{"rekor log public key not found for payload"} } err = VerifySET(bundle.Payload, bundle.SignedEntryTimestamp, pubKey.PubKey) if err != nil { @@ -762,17 +832,17 @@ func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { // Verify the cert against the integrated time. // Note that if the caller requires the certificate to be present, it has to ensure that itself. if err := CheckExpiry(cert, time.Unix(bundle.Payload.IntegratedTime, 0)); err != nil { - return false, errors.Wrap(err, "checking expiry on cert") + return false, fmt.Errorf("checking expiry on cert: %w", err) } } payload, err := sig.Payload() if err != nil { - return false, errors.Wrap(err, "reading payload") + return false, fmt.Errorf("reading payload: %w", err) } signature, err := sig.Base64Signature() if err != nil { - return false, errors.Wrap(err, "reading base64signature") + return false, fmt.Errorf("reading base64signature: %w", err) } alg, bundlehash, err := bundleHash(bundle.Payload.Body.(string), signature) @@ -780,7 +850,7 @@ func VerifyBundle(ctx context.Context, sig oci.Signature) (bool, error) { payloadHash := hex.EncodeToString(h[:]) if alg != "sha256" || bundlehash != payloadHash { - return false, errors.Wrap(err, "matching bundle to payload") + return false, fmt.Errorf("matching bundle to payload: %w", err) } return true, nil } @@ -791,7 +861,7 @@ func compareSigs(bundleBody string, sig oci.Signature) error { // we've returned nil (there are several reasons possible here). actualSig, err := sig.Base64Signature() if err != nil { - return errors.Wrap(err, "base64 signature") + return fmt.Errorf("base64 signature: %w", err) } if actualSig == "" { // NB: empty sig means this is an attestation @@ -799,13 +869,13 @@ func compareSigs(bundleBody string, sig oci.Signature) error { } bundleSignature, err := bundleSig(bundleBody) if err != nil { - return errors.Wrap(err, "failed to extract signature from bundle") + return fmt.Errorf("failed to extract signature from bundle: %w", err) } if bundleSignature == "" { return nil } if bundleSignature != actualSig { - return fmt.Errorf("signature in bundle does not match signature being verified") + return &VerificationError{"signature in bundle does not match signature being verified"} } return nil } @@ -880,7 +950,7 @@ func bundleSig(bundleBody string) (string, error) { bodyDecoded, err := base64.StdEncoding.DecodeString(bundleBody) if err != nil { - return "", errors.Wrap(err, "decoding bundleBody") + return "", fmt.Errorf("decoding bundleBody: %w", err) } // Try Rekord @@ -912,17 +982,17 @@ func bundleSig(bundleBody string) (string, error) { func VerifySET(bundlePayload cbundle.RekorPayload, signature []byte, pub *ecdsa.PublicKey) error { contents, err := json.Marshal(bundlePayload) if err != nil { - return errors.Wrap(err, "marshaling") + return fmt.Errorf("marshaling: %w", err) } canonicalized, err := jsoncanonicalizer.Transform(contents) if err != nil { - return errors.Wrap(err, "canonicalizing") + return fmt.Errorf("canonicalizing: %w", err) } // verify the SET against the public key hash := sha256.Sum256(canonicalized) if !ecdsa.VerifyASN1(pub, hash[:], signature) { - return errors.New("unable to verify") + return &VerificationError{"unable to verify SET"} } return nil } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/internal/signature/layer.go b/vendor/github.com/sigstore/cosign/pkg/oci/internal/signature/layer.go index e042ce1a7d..3a815d3c64 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/internal/signature/layer.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/internal/signature/layer.go @@ -23,7 +23,6 @@ import ( "strings" v1 "github.com/google/go-containerregistry/pkg/v1" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/cosign/bundle" "github.com/sigstore/cosign/pkg/oci" "github.com/sigstore/sigstore/pkg/cryptoutils" @@ -112,7 +111,7 @@ func (s *sigLayer) Bundle() (*bundle.RekorBundle, error) { } var b bundle.RekorBundle if err := json.Unmarshal([]byte(val), &b); err != nil { - return nil, errors.Wrap(err, "unmarshaling bundle") + return nil, fmt.Errorf("unmarshaling bundle: %w", err) } return &b, nil } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/layout/write.go b/vendor/github.com/sigstore/cosign/pkg/oci/layout/write.go index ff80766589..2bc1f7f981 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/layout/write.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/layout/write.go @@ -16,10 +16,11 @@ package layout import ( + "fmt" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/layout" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/oci" ) @@ -32,7 +33,7 @@ func WriteSignedImage(path string, si oci.SignedImage) error { } // write the image if err := appendImage(layoutPath, si, imageAnnotation); err != nil { - return errors.Wrap(err, "appending signed image") + return fmt.Errorf("appending signed image: %w", err) } return writeSignedEntity(layoutPath, si) } @@ -48,7 +49,7 @@ func WriteSignedImageIndex(path string, si oci.SignedImageIndex) error { if err := layoutPath.AppendIndex(si, layout.WithAnnotations( map[string]string{kindAnnotation: imageIndexAnnotation}, )); err != nil { - return errors.Wrap(err, "appending signed image index") + return fmt.Errorf("appending signed image index: %w", err) } return writeSignedEntity(layoutPath, si) } @@ -57,22 +58,22 @@ func writeSignedEntity(path layout.Path, se oci.SignedEntity) error { // write the signatures sigs, err := se.Signatures() if err != nil { - return errors.Wrap(err, "getting signatures") + return fmt.Errorf("getting signatures: %w", err) } if !isEmpty(sigs) { if err := appendImage(path, sigs, sigsAnnotation); err != nil { - return errors.Wrap(err, "appending signatures") + return fmt.Errorf("appending signatures: %w", err) } } // write attestations atts, err := se.Attestations() if err != nil { - return errors.Wrap(err, "getting atts") + return fmt.Errorf("getting atts") } if !isEmpty(atts) { if err := appendImage(path, atts, attsAnnotation); err != nil { - return errors.Wrap(err, "appending atts") + return fmt.Errorf("appending atts: %w", err) } } // TODO (priyawadhwa@) and attachments diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/mutate.go b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/mutate.go index 2c6a3f1429..a60b85cbfb 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/mutate.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/mutate.go @@ -242,7 +242,7 @@ func (si *signedImage) Attestations() (oci.Signatures, error) { if err != nil { return nil, err } - return AppendSignatures(replace) + return ReplaceSignatures(replace) } return AppendSignatures(base, si.att) } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signature.go b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signature.go index dd2a7ab680..ed07f4540f 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signature.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signature.go @@ -18,11 +18,11 @@ import ( "bytes" "crypto/x509" "encoding/json" + "fmt" "io" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/cosign/bundle" "github.com/sigstore/cosign/pkg/oci" "github.com/sigstore/cosign/pkg/oci/static" @@ -132,7 +132,7 @@ func Signature(original oci.Signature, opts ...SignatureOption) (oci.Signature, so := makeSignatureOption(opts...) oldAnn, err := original.Annotations() if err != nil { - return nil, errors.Wrap(err, "could not get annotations from signature to mutate") + return nil, fmt.Errorf("could not get annotations from signature to mutate: %w", err) } var newAnn map[string]string diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signatures.go b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signatures.go index 53f750d9e0..f5c24e92a1 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signatures.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/mutate/signatures.go @@ -16,6 +16,8 @@ package mutate import ( + "time" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/mutate" @@ -36,10 +38,18 @@ func AppendSignatures(base oci.Signatures, sigs ...oci.Signature) (oci.Signature Annotations: ann, }) } + img, err := mutate.Append(base, adds...) if err != nil { return nil, err } + + // Set the Created date to time of execution + img, err = mutate.CreatedAt(img, v1.Time{Time: time.Now()}) + if err != nil { + return nil, err + } + return &sigAppender{ Image: img, base: base, diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/remote/image.go b/vendor/github.com/sigstore/cosign/pkg/oci/remote/image.go index ce532cd244..5a724a2f15 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/remote/image.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/remote/image.go @@ -25,13 +25,15 @@ import ( "github.com/sigstore/cosign/pkg/oci" ) +var ErrImageNotFound = errors.New("image not found in registry") + // SignedImage provides access to a remote image reference, and its signatures. func SignedImage(ref name.Reference, options ...Option) (oci.SignedImage, error) { o := makeOptions(ref.Context(), options...) ri, err := remoteImage(ref, o.ROpt...) var te *transport.Error if errors.As(err, &te) && te.StatusCode == http.StatusNotFound { - return nil, errors.New("image not found in registry") + return nil, ErrImageNotFound } else if err != nil { return nil, err } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/remote/options.go b/vendor/github.com/sigstore/cosign/pkg/oci/remote/options.go index f807eb4326..ed0747c28f 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/remote/options.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/remote/options.go @@ -16,12 +16,12 @@ package remote import ( + "fmt" "os" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/pkg/errors" ) const ( @@ -127,7 +127,10 @@ func WithTargetRepository(repo name.Repository) Option { func GetEnvTargetRepository() (name.Repository, error) { if ro := os.Getenv(RepoOverrideEnvKey); ro != "" { repo, err := name.NewRepository(ro) - return repo, errors.Wrap(err, "parsing $"+RepoOverrideEnvKey) + if err != nil { + return name.Repository{}, fmt.Errorf("parsing $"+RepoOverrideEnvKey+": %w", err) + } + return repo, nil } return name.Repository{}, nil } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/remote/signatures.go b/vendor/github.com/sigstore/cosign/pkg/oci/remote/signatures.go index 9f535a8d9f..172d7f6f5a 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/remote/signatures.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/remote/signatures.go @@ -16,12 +16,12 @@ package remote import ( + "errors" "net/http" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/remote/transport" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/oci" "github.com/sigstore/cosign/pkg/oci/empty" "github.com/sigstore/cosign/pkg/oci/internal/signature" diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/remote/write.go b/vendor/github.com/sigstore/cosign/pkg/oci/remote/write.go index f089b63ebe..603ff6eeb5 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/remote/write.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/remote/write.go @@ -16,10 +16,11 @@ package remote import ( + "fmt" + "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/remote" - "github.com/pkg/errors" "github.com/sigstore/cosign/pkg/oci" ) @@ -34,22 +35,22 @@ func WriteSignedImageIndexImages(ref name.Reference, sii oci.SignedImageIndex, o // write the image index if there is one ii, err := sii.SignedImageIndex(v1.Hash{}) if err != nil { - return errors.Wrap(err, "signed image index") + return fmt.Errorf("signed image index: %w", err) } if ii != nil { if err := remote.WriteIndex(ref, ii, o.ROpt...); err != nil { - return errors.Wrap(err, "writing index") + return fmt.Errorf("writing index: %w", err) } } // write the image if there is one si, err := sii.SignedImage(v1.Hash{}) if err != nil { - return errors.Wrap(err, "signed image") + return fmt.Errorf("signed image: %w", err) } if si != nil { if err := remoteWrite(ref, si, o.ROpt...); err != nil { - return errors.Wrap(err, "remote write") + return fmt.Errorf("remote write: %w", err) } } @@ -61,7 +62,7 @@ func WriteSignedImageIndexImages(ref name.Reference, sii oci.SignedImageIndex, o if sigs != nil { // will be nil if there are no associated signatures sigsTag, err := SignatureTag(ref, opts...) if err != nil { - return errors.Wrap(err, "sigs tag") + return fmt.Errorf("sigs tag: %w", err) } if err := remoteWrite(sigsTag, sigs, o.ROpt...); err != nil { return err @@ -76,7 +77,7 @@ func WriteSignedImageIndexImages(ref name.Reference, sii oci.SignedImageIndex, o if atts != nil { // will be nil if there are no associated attestations attsTag, err := AttestationTag(ref, opts...) if err != nil { - return errors.Wrap(err, "sigs tag") + return fmt.Errorf("sigs tag: %w", err) } return remoteWrite(attsTag, atts, o.ROpt...) } diff --git a/vendor/github.com/sigstore/cosign/pkg/oci/static/file.go b/vendor/github.com/sigstore/cosign/pkg/oci/static/file.go index 270b5a6b76..d1d895bdb6 100644 --- a/vendor/github.com/sigstore/cosign/pkg/oci/static/file.go +++ b/vendor/github.com/sigstore/cosign/pkg/oci/static/file.go @@ -17,6 +17,7 @@ package static import ( "io" + "time" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" @@ -44,6 +45,12 @@ func NewFile(payload []byte, opts ...Option) (oci.File, error) { if err != nil { return nil, err } + + // Set the Created date to time of execution + img, err = mutate.CreatedAt(img, v1.Time{Time: time.Now()}) + if err != nil { + return nil, err + } return &file{ SignedImage: signed.Image(img), layer: layer, diff --git a/vendor/github.com/sigstore/cosign/pkg/signature/keys.go b/vendor/github.com/sigstore/cosign/pkg/signature/keys.go index 610d0c25b3..6c89d22b97 100644 --- a/vendor/github.com/sigstore/cosign/pkg/signature/keys.go +++ b/vendor/github.com/sigstore/cosign/pkg/signature/keys.go @@ -18,11 +18,10 @@ import ( "context" "crypto" "crypto/x509" + "errors" "fmt" "strings" - "github.com/pkg/errors" - "github.com/sigstore/cosign/pkg/blob" "github.com/sigstore/cosign/pkg/cosign" "github.com/sigstore/cosign/pkg/cosign/git" @@ -35,25 +34,6 @@ import ( "github.com/sigstore/sigstore/pkg/signature/kms" ) -var ( - // Fulcio cert-extensions, documented here: https://github.com/sigstore/fulcio/blob/main/docs/oid-info.md - CertExtensionOIDCIssuer = "1.3.6.1.4.1.57264.1.1" - CertExtensionGithubWorkflowTrigger = "1.3.6.1.4.1.57264.1.2" - CertExtensionGithubWorkflowSha = "1.3.6.1.4.1.57264.1.3" - CertExtensionGithubWorkflowName = "1.3.6.1.4.1.57264.1.4" - CertExtensionGithubWorkflowRepository = "1.3.6.1.4.1.57264.1.5" - CertExtensionGithubWorkflowRef = "1.3.6.1.4.1.57264.1.6" - - CertExtensionMap = map[string]string{ - CertExtensionOIDCIssuer: "oidcIssuer", - CertExtensionGithubWorkflowTrigger: "githubWorkflowTrigger", - CertExtensionGithubWorkflowSha: "githubWorkflowSha", - CertExtensionGithubWorkflowName: "githubWorkflowName", - CertExtensionGithubWorkflowRepository: "githubWorkflowRepository", - CertExtensionGithubWorkflowRef: "githubWorkflowRef", - } -) - // LoadPublicKey is a wrapper for VerifierForKeyRef, hardcoding SHA256 as the hash algorithm func LoadPublicKey(ctx context.Context, keyRef string) (verifier signature.Verifier, err error) { return VerifierForKeyRef(ctx, keyRef, crypto.SHA256) @@ -63,9 +43,19 @@ func LoadPublicKey(ctx context.Context, keyRef string) (verifier signature.Verif // verifier using the provided hash algorithm func VerifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto.Hash) (verifier signature.Verifier, err error) { // The key could be plaintext, in a file, at a URL, or in KMS. - if kmsKey, err := kms.Get(ctx, keyRef, hashAlgorithm); err == nil { + var perr *kms.ProviderNotFoundError + kmsKey, err := kms.Get(ctx, keyRef, hashAlgorithm) + switch { + case err == nil: // KMS specified return kmsKey, nil + case errors.As(err, &perr): + // We can ignore ProviderNotFoundError; that just means the keyRef + // didn't match any of the KMS schemes. + default: + // But other errors indicate something more insidious; pass those + // through. + return nil, err } raw, err := blob.LoadFileOrURL(keyRef) @@ -77,7 +67,7 @@ func VerifierForKeyRef(ctx context.Context, keyRef string, hashAlgorithm crypto. // PEM encoded file. pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(raw) if err != nil { - return nil, errors.Wrap(err, "pem to public key") + return nil, fmt.Errorf("pem to public key: %w", err) } return signature.LoadVerifier(pubKey, hashAlgorithm) @@ -98,14 +88,13 @@ func loadKey(keyPath string, pf cosign.PassFunc) (signature.SignerVerifier, erro return cosign.LoadPrivateKey(kb, pass) } -// LoadPublicKeyRaw loads a verifier from a raw public key passed in +// LoadPublicKeyRaw loads a verifier from a PEM-encoded public key func LoadPublicKeyRaw(raw []byte, hashAlgorithm crypto.Hash) (signature.Verifier, error) { - // PEM encoded file. - ed, err := cosign.PemToECDSAKey(raw) + pub, err := cryptoutils.UnmarshalPEMToPublicKey(raw) if err != nil { - return nil, errors.Wrap(err, "pem to ecdsa") + return nil, err } - return signature.LoadECDSAVerifier(ed, hashAlgorithm) + return signature.LoadVerifier(pub, hashAlgorithm) } func SignerFromKeyRef(ctx context.Context, keyRef string, pf cosign.PassFunc) (signature.Signer, error) { @@ -118,19 +107,19 @@ func SignerVerifierFromKeyRef(ctx context.Context, keyRef string, pf cosign.Pass pkcs11UriConfig := pkcs11key.NewPkcs11UriConfig() err := pkcs11UriConfig.Parse(keyRef) if err != nil { - return nil, errors.Wrap(err, "parsing pkcs11 uri") + return nil, fmt.Errorf("parsing pkcs11 uri: %w", err) } // Since we'll be signing, we need to set askForPinIsNeeded to true // because we need access to the private key. sk, err := pkcs11key.GetKeyWithURIConfig(pkcs11UriConfig, true) if err != nil { - return nil, errors.Wrap(err, "opening pkcs11 token key") + return nil, fmt.Errorf("opening pkcs11 token key: %w", err) } sv, err := sk.SignerVerifier() if err != nil { - return nil, errors.Wrap(err, "initializing pkcs11 token signer verifier") + return nil, fmt.Errorf("initializing pkcs11 token signer verifier: %w", err) } return sv, nil @@ -200,19 +189,19 @@ func PublicKeyFromKeyRefWithHashAlgo(ctx context.Context, keyRef string, hashAlg pkcs11UriConfig := pkcs11key.NewPkcs11UriConfig() err := pkcs11UriConfig.Parse(keyRef) if err != nil { - return nil, errors.Wrap(err, "parsing pkcs11 uri") + return nil, fmt.Errorf("parsing pkcs11 uri): %w", err) } // Since we'll be verifying a signature, we do not need to set askForPinIsNeeded to true // because we only need access to the public key. sk, err := pkcs11key.GetKeyWithURIConfig(pkcs11UriConfig, false) if err != nil { - return nil, errors.Wrap(err, "opening pkcs11 token key") + return nil, fmt.Errorf("opening pkcs11 token key: %w", err) } v, err := sk.Verifier() if err != nil { - return nil, errors.Wrap(err, "initializing pkcs11 token verifier") + return nil, fmt.Errorf("initializing pkcs11 token verifier: %w", err) } return v, nil @@ -255,25 +244,3 @@ func CertSubject(c *x509.Certificate) string { } return "" } - -func CertIssuerExtension(cert *x509.Certificate) string { - for _, ext := range cert.Extensions { - if ext.Id.String() == CertExtensionOIDCIssuer { - return string(ext.Value) - } - } - return "" -} - -func CertExtensions(cert *x509.Certificate) map[string]string { - extensions := map[string]string{} - for _, ext := range cert.Extensions { - readableName, ok := CertExtensionMap[ext.Id.String()] - if ok { - extensions[readableName] = string(ext.Value) - } else { - extensions[ext.Id.String()] = string(ext.Value) - } - } - return extensions -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/api/ca.go b/vendor/github.com/sigstore/fulcio/pkg/api/ca.go deleted file mode 100644 index 7fac5f459e..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/api/ca.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package api - -import ( - "context" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/coreos/go-oidc/v3/oidc" - certauth "github.com/sigstore/fulcio/pkg/ca" - "github.com/sigstore/fulcio/pkg/challenges" - "github.com/sigstore/fulcio/pkg/config" - "github.com/sigstore/fulcio/pkg/ctl" - "github.com/sigstore/fulcio/pkg/log" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -type Key struct { - // +required - Content []byte `json:"content"` - Algorithm string `json:"algorithm,omitempty"` -} - -type CertificateRequest struct { - // +required - PublicKey Key `json:"publicKey"` - - // +required - SignedEmailAddress []byte `json:"signedEmailAddress"` -} - -const ( - signingCertPath = "/api/v1/signingCert" - rootCertPath = "/api/v1/rootCert" -) - -// NewHandler creates a new http.Handler for serving the Fulcio API. -func NewHandler() http.Handler { - handler := http.NewServeMux() - handler.HandleFunc(signingCertPath, signingCert) - handler.HandleFunc(rootCertPath, rootCert) - return handler -} - -func extractIssuer(token string) (string, error) { - parts := strings.Split(token, ".") - if len(parts) < 2 { - return "", fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts)) - } - raw, err := base64.RawURLEncoding.DecodeString(parts[1]) - if err != nil { - return "", fmt.Errorf("oidc: malformed jwt payload: %w", err) - } - var payload struct { - Issuer string `json:"iss"` - } - - if err := json.Unmarshal(raw, &payload); err != nil { - return "", fmt.Errorf("oidc: failed to unmarshal claims: %w", err) - } - return payload.Issuer, nil -} - -// We do this to bypass needing actual OIDC tokens for unit testing. -var authorize = actualAuthorize - -func actualAuthorize(req *http.Request) (*oidc.IDToken, error) { - // Strip off the "Bearer" prefix. - token := strings.Replace(req.Header.Get("Authorization"), "Bearer ", "", 1) - - issuer, err := extractIssuer(token) - if err != nil { - return nil, err - } - - verifier, ok := config.FromContext(req.Context()).GetVerifier(issuer) - if !ok { - return nil, fmt.Errorf("unsupported issuer: %s", issuer) - } - return verifier.Verify(req.Context(), token) -} - -func signingCert(w http.ResponseWriter, req *http.Request) { - if req.Method != http.MethodPost { - err := fmt.Errorf("signing cert handler must receive POSTs, got %s", req.Method) - handleFulcioAPIError(w, req, http.StatusMethodNotAllowed, err, err.Error()) - return - } - if gotContentType, wantContentType := req.Header.Get("Content-Type"), "application/json"; gotContentType != wantContentType { - err := fmt.Errorf("signing cert handler must receive %q, got %q", wantContentType, gotContentType) - handleFulcioAPIError(w, req, http.StatusUnsupportedMediaType, err, err.Error()) - return - } - - ctx := req.Context() - logger := log.ContextLogger(ctx) - - principal, err := authorize(req) - if err != nil { - handleFulcioAPIError(w, req, http.StatusUnauthorized, err, invalidCredentials) - return - } - - // Parse the request body. - cr := CertificateRequest{} - if err := json.NewDecoder(req.Body).Decode(&cr); err != nil { - handleFulcioAPIError(w, req, http.StatusBadRequest, err, invalidCertificateRequest) - return - } - - publicKeyBytes := cr.PublicKey.Content - // try to unmarshal as DER - publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes) - if err != nil { - // try to unmarshal as PEM - logger.Debugf("error parsing public key as DER, trying pem: %v", err.Error()) - publicKey, err = cryptoutils.UnmarshalPEMToPublicKey(publicKeyBytes) - if err != nil { - handleFulcioAPIError(w, req, http.StatusBadRequest, err, invalidPublicKey) - return - } - } - - subject, err := ExtractSubject(ctx, principal, publicKey, cr.SignedEmailAddress) - if err != nil { - handleFulcioAPIError(w, req, http.StatusBadRequest, err, invalidSignature) - return - } - - ca := GetCA(ctx) - - var csc *certauth.CodeSigningCertificate - var sctBytes []byte - // TODO: prefer embedding SCT if possible - if _, ok := ca.(certauth.EmbeddedSCTCA); !ok { - // currently configured CA doesn't support pre-certificate flow required to embed SCT in final certificate - csc, err = ca.CreateCertificate(ctx, subject) - if err != nil { - // if the error was due to invalid input in the request, return HTTP 400 - if _, ok := err.(certauth.ValidationError); ok { - handleFulcioAPIError(w, req, http.StatusBadRequest, err, err.Error()) - return - } - // otherwise return a 500 error to reflect that it is a transient server issue that the client can't resolve - handleFulcioAPIError(w, req, http.StatusInternalServerError, err, genericCAError) - return - } - - // TODO: initialize CTL client once - // Submit to CTL - logger.Info("Submitting CTL inclusion for OIDC grant: ", subject.Value) - ctURL := GetCTLogURL(ctx) - if ctURL != "" { - c := ctl.New(ctURL) - sct, err := c.AddChain(csc) - if err != nil { - handleFulcioAPIError(w, req, http.StatusInternalServerError, err, fmt.Sprintf(failedToEnterCertInCTL, ctURL)) - return - } - sctBytes, err = json.Marshal(sct) - if err != nil { - handleFulcioAPIError(w, req, http.StatusInternalServerError, err, failedToMarshalSCT) - return - } - logger.Info("CTL Submission Signature Received: ", sct.Signature) - logger.Info("CTL Submission ID Received: ", sct.ID) - } else { - logger.Info("Skipping CT log upload.") - } - } - - metricNewEntries.Inc() - - var ret strings.Builder - finalPEM, err := csc.CertPEM() - if err != nil { - handleFulcioAPIError(w, req, http.StatusInternalServerError, err, failedToMarshalCert) - return - } - fmt.Fprintf(&ret, "%s\n", finalPEM) - finalChainPEM, err := csc.ChainPEM() - if err != nil { - handleFulcioAPIError(w, req, http.StatusInternalServerError, err, failedToMarshalCert) - return - } - if len(finalChainPEM) > 0 { - fmt.Fprintf(&ret, "%s\n", finalChainPEM) - } - - // Set the SCT and Content-Type headers, and then respond with a 201 Created. - w.Header().Add("SCT", base64.StdEncoding.EncodeToString(sctBytes)) - w.Header().Add("Content-Type", "application/pem-certificate-chain") - w.WriteHeader(http.StatusCreated) - // Write the PEM encoded certificate chain to the response body. - if _, err := w.Write([]byte(strings.TrimSpace(ret.String()))); err != nil { - logger.Error("Error writing response: ", err) - } -} - -func rootCert(w http.ResponseWriter, req *http.Request) { - ctx := req.Context() - logger := log.ContextLogger(ctx) - - ca := GetCA(ctx) - root, err := ca.Root(ctx) - if err != nil { - logger.Error("Error retrieving root cert: ", err) - } - w.Header().Add("Content-Type", "application/pem-certificate-chain") - if _, err := w.Write(root); err != nil { - logger.Error("Error writing response: ", err) - } -} - -func ExtractSubject(ctx context.Context, tok *oidc.IDToken, publicKey crypto.PublicKey, challenge []byte) (*challenges.ChallengeResult, error) { - iss, ok := config.FromContext(ctx).GetIssuer(tok.Issuer) - if !ok { - return nil, fmt.Errorf("configuration can not be loaded for issuer %v", tok.Issuer) - } - switch iss.Type { - case config.IssuerTypeEmail: - return challenges.Email(ctx, tok, publicKey, challenge) - case config.IssuerTypeSpiffe: - return challenges.Spiffe(ctx, tok, publicKey, challenge) - case config.IssuerTypeGithubWorkflow: - return challenges.GithubWorkflow(ctx, tok, publicKey, challenge) - case config.IssuerTypeKubernetes: - return challenges.Kubernetes(ctx, tok, publicKey, challenge) - default: - return nil, fmt.Errorf("unsupported issuer: %s", iss.Type) - } -} - -type caKey struct{} - -// WithCA associates the provided certificate authority with the provided context. -func WithCA(ctx context.Context, ca certauth.CertificateAuthority) context.Context { - return context.WithValue(ctx, caKey{}, ca) -} - -// GetCA accesses the certificate authority associated with the provided context. -func GetCA(ctx context.Context) certauth.CertificateAuthority { - untyped := ctx.Value(caKey{}) - if untyped == nil { - return nil - } - return untyped.(certauth.CertificateAuthority) -} - -type ctKey struct{} - -// WithCTLogURL associates the provided certificate transparency log URL with -// the provided context. -func WithCTLogURL(ctx context.Context, ct string) context.Context { - return context.WithValue(ctx, ctKey{}, ct) -} - -// GetCTLogURL accesses the certificate transparency log URL associated with -// the provided context. -func GetCTLogURL(ctx context.Context) string { - untyped := ctx.Value(ctKey{}) - if untyped == nil { - return "" - } - return untyped.(string) -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/api/client.go b/vendor/github.com/sigstore/fulcio/pkg/api/client.go index f93e795ed1..5c8fa1e600 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/api/client.go +++ b/vendor/github.com/sigstore/fulcio/pkg/api/client.go @@ -21,6 +21,7 @@ import ( "encoding/json" "encoding/pem" "errors" + "fmt" "io" "net/http" "net/url" @@ -38,11 +39,33 @@ type RootResponse struct { ChainPEM []byte } +type Key struct { + // +required + Content []byte `json:"content"` + Algorithm string `json:"algorithm,omitempty"` +} + +type CertificateRequest struct { + // +optional + PublicKey Key `json:"publicKey"` + + // +optional + SignedEmailAddress []byte `json:"signedEmailAddress"` + + // +optional + CertificateSigningRequest []byte `json:"certificateSigningRequest"` +} + +const ( + signingCertPath = "/api/v1/signingCert" + rootCertPath = "/api/v1/rootCert" +) + // SigstorePublicServerURL is the URL of Sigstore's public Fulcio service. const SigstorePublicServerURL = "https://fulcio.sigstore.dev" -// Client is the interface for accessing the Fulcio API. -type Client interface { +// LegacyClient is the interface for accessing the Fulcio API. +type LegacyClient interface { // SigningCert sends the provided CertificateRequest to the /api/v1/signingCert // endpoint of a Fulcio API, authenticated with the provided bearer token. SigningCert(cr CertificateRequest, token string) (*CertificateResponse, error) @@ -54,7 +77,7 @@ type Client interface { type ClientOption func(*clientOptions) // NewClient creates a new Fulcio API client talking to the provided URL. -func NewClient(url *url.URL, opts ...ClientOption) Client { +func NewClient(url *url.URL, opts ...ClientOption) LegacyClient { o := makeOptions(opts...) return &client{ @@ -71,7 +94,7 @@ type client struct { client *http.Client } -var _ Client = (*client)(nil) +var _ LegacyClient = (*client)(nil) // SigningCert implements Client func (c *client) SigningCert(cr CertificateRequest, token string) (*CertificateResponse, error) { @@ -81,12 +104,12 @@ func (c *client) SigningCert(cr CertificateRequest, token string) (*CertificateR b, err := json.Marshal(cr) if err != nil { - return nil, err + return nil, fmt.Errorf("marshal: %w", err) } req, err := http.NewRequest(http.MethodPost, endpoint.String(), bytes.NewBuffer(b)) if err != nil { - return nil, err + return nil, fmt.Errorf("request: %w", err) } // Set the authorization header to our OIDC bearer token. req.Header.Set("Authorization", "Bearer "+token) @@ -95,25 +118,25 @@ func (c *client) SigningCert(cr CertificateRequest, token string) (*CertificateR resp, err := c.client.Do(req) if err != nil { - return nil, err + return nil, fmt.Errorf("client: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return nil, err + return nil, fmt.Errorf("%s read: %w", endpoint.String(), err) } // The API should return a 201 Created on success. If we see anything else, // then turn the response body into an error. if resp.StatusCode != http.StatusCreated { - return nil, errors.New(string(body)) + return nil, fmt.Errorf("%s %s returned %s: %q", http.MethodPost, endpoint.String(), resp.Status, body) } // Extract the SCT from the response header. sct, err := base64.StdEncoding.DecodeString(resp.Header.Get("SCT")) if err != nil { - return nil, err + return nil, fmt.Errorf("decode: %w", err) } // Split the cert and the chain @@ -134,7 +157,11 @@ func (c *client) RootCert() (*RootResponse, error) { endpoint := *c.baseURL endpoint.Path = path.Join(endpoint.Path, rootCertPath) - resp, err := http.Get(endpoint.String()) + req, err := http.NewRequest(http.MethodGet, endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("request: %w", err) + } + resp, err := c.client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/sigstore/fulcio/pkg/api/error.go b/vendor/github.com/sigstore/fulcio/pkg/api/error.go deleted file mode 100644 index 2ae608de47..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/api/error.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package api - -import ( - "fmt" - "net/http" - - "github.com/sigstore/fulcio/pkg/log" -) - -const ( - invalidSignature = "The signature supplied in the request could not be verified" - invalidCertificateRequest = "The CertificateRequest was invalid" - invalidPublicKey = "The public key supplied in the request could not be parsed" - failedToEnterCertInCTL = "Error entering certificate in CTL @ '%v'" - failedToMarshalSCT = "Error marshaling signed certificate timestamp" - failedToMarshalCert = "Error marshaling code signing certificate" - //nolint - invalidCredentials = "There was an error processing the credentials for this request" - genericCAError = "error communicating with CA backend" -) - -func handleFulcioAPIError(w http.ResponseWriter, req *http.Request, code int, err error, message string, fields ...interface{}) { - if message == "" { - message = http.StatusText(code) - } - - log.RequestIDLogger(req).Errorw("exiting with error", append([]interface{}{"handler", req.URL.Path, "statusCode", code, "clientMessage", message, "error", err}, fields...)...) - http.Error(w, fmt.Sprintf(`{"code":%d,"message":%q}`, code, message), code) -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/ca/interface.go b/vendor/github.com/sigstore/fulcio/pkg/ca/interface.go deleted file mode 100644 index ff2ac51e90..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/ca/interface.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ca - -import ( - "bytes" - "context" - "crypto/x509" - "strings" - - "github.com/sigstore/fulcio/pkg/challenges" - "github.com/sigstore/sigstore/pkg/cryptoutils" -) - -type CodeSigningCertificate struct { - Subject *challenges.ChallengeResult - FinalCertificate *x509.Certificate - FinalChain []*x509.Certificate - finalPEM []byte - finalChainPEM []byte -} - -type CodeSigningPreCertificate struct { - Subject *challenges.ChallengeResult - PreCert *x509.Certificate -} - -func CreateCSCFromPEM(subject *challenges.ChallengeResult, cert string, chain []string) (*CodeSigningCertificate, error) { - c := &CodeSigningCertificate{ - Subject: subject, - } - - // convert to X509 and store both formats - finalCert, err := cryptoutils.UnmarshalCertificatesFromPEM([]byte(cert)) - if err != nil { - return nil, err - } - c.finalPEM = []byte(cert) - c.FinalCertificate = finalCert[0] - - // convert to X509 and store both formats - chainBytes := []byte(strings.Join(chain, "")) - if len(chainBytes) != 0 { - c.FinalChain, err = cryptoutils.UnmarshalCertificatesFromPEM(chainBytes) - if err != nil { - return nil, err - } - c.finalChainPEM = chainBytes - } - return c, nil -} - -func CreateCSCFromDER(subject *challenges.ChallengeResult, cert, chain []byte) (c *CodeSigningCertificate, err error) { - c = &CodeSigningCertificate{ - Subject: subject, - } - - // convert to X509 and store both formats - c.finalPEM = cryptoutils.PEMEncode(cryptoutils.CertificatePEMType, cert) - c.FinalCertificate, err = x509.ParseCertificate(cert) - if err != nil { - return nil, err - } - - // convert to X509 and store both formats - c.FinalChain, err = x509.ParseCertificates(chain) - if err != nil { - return nil, err - } - buf := bytes.Buffer{} - for i, chainCert := range c.FinalChain { - buf.Write(cryptoutils.PEMEncode(cryptoutils.CertificatePEMType, chainCert.Raw)) - if i != len(c.FinalChain) { - buf.WriteRune('\n') - } - } - c.finalChainPEM = buf.Bytes() - return c, nil -} - -func (c *CodeSigningCertificate) CertPEM() ([]byte, error) { - var err error - if c.finalPEM == nil { - c.finalPEM, err = cryptoutils.MarshalCertificateToPEM(c.FinalCertificate) - } - return c.finalPEM, err -} - -func (c *CodeSigningCertificate) ChainPEM() ([]byte, error) { - var err error - if c.finalChainPEM == nil && len(c.FinalChain) > 0 { - c.finalChainPEM, err = cryptoutils.MarshalCertificatesToPEM(c.FinalChain) - } - return c.finalChainPEM, err -} - -// CertificateAuthority only returns the SCT in detached format -type CertificateAuthority interface { - CreateCertificate(ctx context.Context, challenge *challenges.ChallengeResult) (*CodeSigningCertificate, error) - Root(ctx context.Context) ([]byte, error) -} - -type EmbeddedSCTCA interface { - CreatePrecertificate(ctx context.Context, challenge *challenges.ChallengeResult) (*CodeSigningPreCertificate, error) - IssueFinalCertificate(ctx context.Context, precert *CodeSigningPreCertificate) (*CodeSigningCertificate, error) -} - -// ValidationError indicates that there is an issue with the content in the HTTP Request that -// should result in an HTTP 400 Bad Request error being returned to the client -type ValidationError error diff --git a/vendor/github.com/sigstore/fulcio/pkg/challenges/challenges.go b/vendor/github.com/sigstore/fulcio/pkg/challenges/challenges.go deleted file mode 100644 index db71f04c1d..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/challenges/challenges.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package challenges - -import ( - "bytes" - "context" - "crypto" - "errors" - "fmt" - "net/url" - "strings" - - "github.com/sigstore/fulcio/pkg/config" - - "github.com/coreos/go-oidc/v3/oidc" - "github.com/sigstore/fulcio/pkg/oauthflow" - "github.com/sigstore/sigstore/pkg/signature" -) - -type ChallengeType int - -const ( - EmailValue ChallengeType = iota - SpiffeValue - GithubWorkflowValue - KubernetesValue -) - -type AdditionalInfo int - -// Additional information that can be added as a cert extension. -const ( - GithubWorkflowTrigger AdditionalInfo = iota - GithubWorkflowSha - GithubWorkflowName - GithubWorkflowRepository - GithubWorkflowRef -) - -type ChallengeResult struct { - Issuer string - TypeVal ChallengeType - PublicKey crypto.PublicKey - Value string - // Extra information from the token that can be added to extensions. - AdditionalInfo map[AdditionalInfo]string -} - -func CheckSignature(pub crypto.PublicKey, proof []byte, email string) error { - verifier, err := signature.LoadVerifier(pub, crypto.SHA256) - if err != nil { - return err - } - - return verifier.VerifySignature(bytes.NewReader(proof), strings.NewReader(email)) -} - -func Email(ctx context.Context, principal *oidc.IDToken, pubKey crypto.PublicKey, challenge []byte) (*ChallengeResult, error) { - emailAddress, emailVerified, err := oauthflow.EmailFromIDToken(principal) - if !emailVerified { - return nil, errors.New("email_verified claim was false") - } else if err != nil { - return nil, err - } - - // Check the proof - if err := CheckSignature(pubKey, challenge, emailAddress); err != nil { - return nil, err - } - - cfg, ok := config.FromContext(ctx).GetIssuer(principal.Issuer) - if !ok { - return nil, errors.New("invalid configuration for OIDC ID Token issuer") - } - - issuer, err := oauthflow.IssuerFromIDToken(principal, cfg.IssuerClaim) - if err != nil { - return nil, err - } - - // Now issue cert! - return &ChallengeResult{ - Issuer: issuer, - PublicKey: pubKey, - TypeVal: EmailValue, - Value: emailAddress, - }, nil -} - -func Spiffe(ctx context.Context, principal *oidc.IDToken, pubKey crypto.PublicKey, challenge []byte) (*ChallengeResult, error) { - - spiffeID := principal.Subject - - cfg, ok := config.FromContext(ctx).GetIssuer(principal.Issuer) - if !ok { - return nil, errors.New("invalid configuration for OIDC ID Token issuer") - } - - // The Spiffe ID must be a subdomain of the issuer (spiffe://foo.example.com -> example.com/...) - u, err := url.Parse(cfg.IssuerURL) - if err != nil { - return nil, err - } - - issuerHostname := u.Hostname() - if !isSpiffeIDAllowed(u.Hostname(), spiffeID) { - return nil, fmt.Errorf("%s is not allowed for %s", spiffeID, issuerHostname) - } - - // Check the proof - if err := CheckSignature(pubKey, challenge, spiffeID); err != nil { - return nil, err - } - - issuer, err := oauthflow.IssuerFromIDToken(principal, cfg.IssuerClaim) - if err != nil { - return nil, err - } - - // Now issue cert! - return &ChallengeResult{ - Issuer: issuer, - PublicKey: pubKey, - TypeVal: SpiffeValue, - Value: spiffeID, - }, nil -} - -func Kubernetes(ctx context.Context, principal *oidc.IDToken, pubKey crypto.PublicKey, challenge []byte) (*ChallengeResult, error) { - k8sURI, err := kubernetesToken(principal) - if err != nil { - return nil, err - } - - // Check the proof - if err := CheckSignature(pubKey, challenge, principal.Subject); err != nil { - return nil, err - } - - cfg, ok := config.FromContext(ctx).GetIssuer(principal.Issuer) - if !ok { - return nil, errors.New("invalid configuration for OIDC ID Token issuer") - } - - issuer, err := oauthflow.IssuerFromIDToken(principal, cfg.IssuerClaim) - if err != nil { - return nil, err - } - - // Now issue cert! - return &ChallengeResult{ - Issuer: issuer, - PublicKey: pubKey, - TypeVal: KubernetesValue, - Value: k8sURI, - }, nil -} - -func GithubWorkflow(ctx context.Context, principal *oidc.IDToken, pubKey crypto.PublicKey, challenge []byte) (*ChallengeResult, error) { - workflowRef, err := workflowFromIDToken(principal) - if err != nil { - return nil, err - } - additionalInfo, err := workflowInfoFromIDToken(principal) - if err != nil { - return nil, err - } - - // Check the proof - if err := CheckSignature(pubKey, challenge, principal.Subject); err != nil { - return nil, err - } - - cfg, ok := config.FromContext(ctx).GetIssuer(principal.Issuer) - if !ok { - return nil, errors.New("invalid configuration for OIDC ID Token issuer") - } - - issuer, err := oauthflow.IssuerFromIDToken(principal, cfg.IssuerClaim) - if err != nil { - return nil, err - } - - // Now issue cert! - return &ChallengeResult{ - Issuer: issuer, - PublicKey: pubKey, - TypeVal: GithubWorkflowValue, - Value: workflowRef, - AdditionalInfo: additionalInfo, - }, nil -} - -func kubernetesToken(token *oidc.IDToken) (string, error) { - // Extract custom claims - var claims struct { - // "kubernetes.io": { - // "namespace": "default", - // "pod": { - // "name": "oidc-test", - // "uid": "49ad3572-b3dd-43a6-8d77-5858d3660275" - // }, - // "serviceaccount": { - // "name": "default", - // "uid": "f5720c1d-e152-4356-a897-11b07aff165d" - // } - // } - Kubernetes struct { - Namespace string `json:"namespace"` - Pod struct { - Name string `json:"name"` - UID string `json:"uid"` - } `json:"pod"` - ServiceAccount struct { - Name string `json:"name"` - UID string `json:"uid"` - } `json:"serviceaccount"` - } `json:"kubernetes.io"` - } - if err := token.Claims(&claims); err != nil { - return "", err - } - - // We use this in URIs, so it has to be a URI. - return "https://kubernetes.io/namespaces/" + claims.Kubernetes.Namespace + "/serviceaccounts/" + claims.Kubernetes.ServiceAccount.Name, nil -} - -func workflowFromIDToken(token *oidc.IDToken) (string, error) { - // Extract custom claims - var claims struct { - JobWorkflowRef string `json:"job_workflow_ref"` - // The other fields that are present here seem to depend on the type - // of workflow trigger that initiated the action. - } - if err := token.Claims(&claims); err != nil { - return "", err - } - - // We use this in URIs, so it has to be a URI. - return "https://github.com/" + claims.JobWorkflowRef, nil -} - -func workflowInfoFromIDToken(token *oidc.IDToken) (map[AdditionalInfo]string, error) { - // Extract custom claims - var claims struct { - Sha string `json:"sha"` - Trigger string `json:"event_name"` - Repository string `json:"repository"` - Workflow string `json:"workflow"` - Ref string `json:"ref"` - // The other fields that are present here seem to depend on the type - // of workflow trigger that initiated the action. - } - if err := token.Claims(&claims); err != nil { - return nil, err - } - - // We use this in URIs, so it has to be a URI. - return map[AdditionalInfo]string{ - GithubWorkflowSha: claims.Sha, - GithubWorkflowTrigger: claims.Trigger, - GithubWorkflowName: claims.Workflow, - GithubWorkflowRepository: claims.Repository, - GithubWorkflowRef: claims.Ref}, nil -} - -func isSpiffeIDAllowed(host, spiffeID string) bool { - u, err := url.Parse(spiffeID) - if err != nil { - return false - } - if u.Scheme != "spiffe" { - return false - } - if u.Hostname() == host { - return true - } - return strings.Contains(u.Hostname(), "."+host) -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/config/config.go b/vendor/github.com/sigstore/fulcio/pkg/config/config.go deleted file mode 100644 index 937fee6c35..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/config/config.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package config - -import ( - "context" - "crypto/x509" - "encoding/json" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" - - "github.com/coreos/go-oidc/v3/oidc" - lru "github.com/hashicorp/golang-lru" - "github.com/sigstore/fulcio/pkg/log" -) - -type FulcioConfig struct { - OIDCIssuers map[string]OIDCIssuer `json:"OIDCIssuers,omitempty"` - - // A meta issuer has a templated URL of the form: - // https://oidc.eks.*.amazonaws.com/id/* - // Where * can match a single hostname or URI path parts - // (in particular, no '.' or '/' are permitted, among - // other special characters) Some examples we want to match: - // * https://oidc.eks.us-west-2.amazonaws.com/id/B02C93B6A2D30341AD01E1B6D48164CB - // * https://container.googleapis.com/v1/projects/mattmoor-credit/locations/us-west1-b/clusters/tenant-cluster - MetaIssuers map[string]OIDCIssuer `json:"MetaIssuers,omitempty"` - - // verifiers is a fixed mapping from our OIDCIssuers to their OIDC verifiers. - verifiers map[string]*oidc.IDTokenVerifier - // lru is an LRU cache of recently used verifiers for our meta issuers. - lru *lru.TwoQueueCache -} - -type OIDCIssuer struct { - IssuerURL string `json:"IssuerURL,omitempty"` - ClientID string `json:"ClientID"` - Type IssuerType `json:"Type"` - IssuerClaim string `json:"IssuerClaim,omitempty"` -} - -func metaRegex(issuer string) (*regexp.Regexp, error) { - // Quote all of the "meta" characters like `.` to avoid - // those literal characters in the URL matching any character. - // This will ALSO quote `*`, so we replace the quoted version. - quoted := regexp.QuoteMeta(issuer) - - // Replace the quoted `*` with a regular expression that - // will match alpha-numeric parts with common additional - // "special" characters. - replaced := strings.ReplaceAll(quoted, regexp.QuoteMeta("*"), "[-_a-zA-Z0-9]+") - - // Compile into a regular expression. - return regexp.Compile(replaced) -} - -// GetIssuer looks up the issuer configuration for an `issuerURL` -// coming from an incoming OIDC token. If no matching configuration -// is found, then it returns `false`. -func (fc *FulcioConfig) GetIssuer(issuerURL string) (OIDCIssuer, bool) { - iss, ok := fc.OIDCIssuers[issuerURL] - if ok { - return iss, ok - } - - for meta, iss := range fc.MetaIssuers { - re, err := metaRegex(meta) - if err != nil { - continue // Shouldn't happen, we check parsing the config - } - if re.MatchString(issuerURL) { - // If it matches, then return a concrete OIDCIssuer - // configuration for this issuer URL. - return OIDCIssuer{ - IssuerURL: issuerURL, - ClientID: iss.ClientID, - Type: iss.Type, - IssuerClaim: iss.IssuerClaim, - }, true - } - } - - return OIDCIssuer{}, false -} - -// GetVerifier fetches a token verifier for the given `issuerURL` -// coming from an incoming OIDC token. If no matching configuration -// is found, then it returns `false`. -func (fc *FulcioConfig) GetVerifier(issuerURL string) (*oidc.IDTokenVerifier, bool) { - // Look up our fixed issuer verifiers - v, ok := fc.verifiers[issuerURL] - if ok { - return v, true - } - - // Look in the LRU cache for a verifier - untyped, ok := fc.lru.Get(issuerURL) - if ok { - return untyped.(*oidc.IDTokenVerifier), true - } - // If this issuer hasn't been recently used, then create a new verifier - // and add it to the LRU cache. - - iss, ok := fc.GetIssuer(issuerURL) - if !ok { - return nil, false - } - - provider, err := oidc.NewProvider(context.Background(), issuerURL) - if err != nil { - log.Logger.Warnf("Failed to create provider for issuer URL %q: %v", issuerURL, err) - return nil, false - } - verifier := provider.Verifier(&oidc.Config{ClientID: iss.ClientID}) - fc.lru.Add(issuerURL, verifier) - return verifier, true -} - -func (fc *FulcioConfig) prepare() error { - fc.verifiers = make(map[string]*oidc.IDTokenVerifier, len(fc.OIDCIssuers)) - for _, iss := range fc.OIDCIssuers { - provider, err := oidc.NewProvider(context.Background(), iss.IssuerURL) - if err != nil { - return err - } - fc.verifiers[iss.IssuerURL] = provider.Verifier(&oidc.Config{ClientID: iss.ClientID}) - } - - cache, err := lru.New2Q(100 /* size */) - if err != nil { - return err - } - fc.lru = cache - return nil -} - -type IssuerType string - -const ( - IssuerTypeEmail = "email" - IssuerTypeGithubWorkflow = "github-workflow" - IssuerTypeKubernetes = "kubernetes" - IssuerTypeSpiffe = "spiffe" -) - -func parseConfig(b []byte) (cfg *FulcioConfig, err error) { - cfg = &FulcioConfig{} - if err := json.Unmarshal(b, cfg); err != nil { - return nil, err - } - return cfg, nil -} - -var DefaultConfig = &FulcioConfig{ - OIDCIssuers: map[string]OIDCIssuer{ - "https://oauth2.sigstore.dev/auth": { - IssuerURL: "https://oauth2.sigstore.dev/auth", - ClientID: "sigstore", - IssuerClaim: "$.federated_claims.connector_id", - Type: IssuerTypeEmail, - }, - "https://accounts.google.com": { - IssuerURL: "https://accounts.google.com", - ClientID: "sigstore", - Type: IssuerTypeEmail, - }, - "https://token.actions.githubusercontent.com": { - IssuerURL: "https://token.actions.githubusercontent.com", - ClientID: "sigstore", - Type: IssuerTypeGithubWorkflow, - }, - }, -} - -var originalTransport = http.DefaultTransport - -type configKey struct{} - -func With(ctx context.Context, cfg *FulcioConfig) context.Context { - ctx = context.WithValue(ctx, configKey{}, cfg) - return ctx -} - -func FromContext(ctx context.Context) *FulcioConfig { - untyped := ctx.Value(configKey{}) - if untyped == nil { - return nil - } - return untyped.(*FulcioConfig) -} - -// Load a config from disk, or use defaults -func Load(configPath string) (*FulcioConfig, error) { - if _, err := os.Stat(configPath); os.IsNotExist(err) { - log.Logger.Infof("No config at %s, using defaults: %v", configPath, DefaultConfig) - config := DefaultConfig - if err := config.prepare(); err != nil { - return nil, err - } - return config, nil - } - b, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - return Read(b) -} - -// Read parses the bytes of a config -func Read(b []byte) (*FulcioConfig, error) { - config, err := parseConfig(b) - if err != nil { - return nil, err - } - - if _, ok := config.GetIssuer("https://kubernetes.default.svc"); ok { - // Add the Kubernetes cluster's CA to the system CA pool, and to - // the default transport. - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - const k8sCA = "/var/run/fulcio/ca.crt" - certs, err := ioutil.ReadFile(k8sCA) - if err != nil { - return nil, err - } - if ok := rootCAs.AppendCertsFromPEM(certs); !ok { - return nil, err - } - - t := originalTransport.(*http.Transport).Clone() - t.TLSClientConfig.RootCAs = rootCAs - http.DefaultTransport = t - } else { - // If we parse a config that doesn't include a cluster issuer - // signed with the cluster'sCA, then restore the original transport - // (in case we overwrote it) - http.DefaultTransport = originalTransport - } - - if err := config.prepare(); err != nil { - return nil, err - } - return config, nil -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/ctl/ctl.go b/vendor/github.com/sigstore/fulcio/pkg/ctl/ctl.go deleted file mode 100644 index 044259c923..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/ctl/ctl.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package ctl - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/sigstore/fulcio/pkg/ca" -) - -const addChainPath = "ct/v1/add-chain" - -type Client struct { - c *http.Client - url string -} - -func New(url string) *Client { - c := &http.Client{Timeout: 30 * time.Second} - return &Client{ - c: c, - url: url, - } -} - -type certChain struct { - Chain []string `json:"chain"` -} - -type CertChainResponse struct { - SctVersion int `json:"sct_version"` - ID string `json:"id"` - Timestamp int64 `json:"timestamp"` - Extensions string `json:"extensions"` - Signature string `json:"signature"` -} - -type ErrorResponse struct { - StatusCode int `json:"statusCode"` - ErrorCode string `json:"errorCode"` - Message string `json:"message"` -} - -func (err *ErrorResponse) Error() string { - if err.ErrorCode == "" { - return fmt.Sprintf("%d CT API error: %s", err.StatusCode, err.Message) - } - return fmt.Sprintf("%d (%s) CT API error: %s", err.StatusCode, err.ErrorCode, err.Message) -} - -func (c *Client) AddChain(csc *ca.CodeSigningCertificate) (*CertChainResponse, error) { - chainjson := &certChain{Chain: []string{ - base64.StdEncoding.EncodeToString(csc.FinalCertificate.Raw), - }} - - for _, c := range csc.FinalChain { - chainjson.Chain = append(chainjson.Chain, base64.StdEncoding.EncodeToString(c.Raw)) - } - jsonStr, err := json.Marshal(chainjson) - if err != nil { - return nil, err - } - - // Send to add-chain on CT log - url := fmt.Sprintf("%s/%s", c.url, addChainPath) - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/json") - resp, err := c.c.Do(req) - if err != nil { - return nil, err - } - - switch resp.StatusCode { - case 200: - var ctlResp CertChainResponse - if err := json.NewDecoder(resp.Body).Decode(&ctlResp); err != nil { - return nil, err - } - return &ctlResp, nil - case 400, 401, 403, 500: - var errRes ErrorResponse - if err := json.NewDecoder(resp.Body).Decode(&errRes); err != nil { - return nil, err - } - - if errRes.StatusCode == 0 { - errRes.StatusCode = resp.StatusCode - } - return nil, &errRes - default: - return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode) - } -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/log/log.go b/vendor/github.com/sigstore/fulcio/pkg/log/log.go deleted file mode 100644 index befdfa9de7..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/log/log.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package log - -import ( - "context" - "log" - "net/http" - - "github.com/go-chi/chi/middleware" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// Logger set the default logger to development mode -var Logger *zap.SugaredLogger - -func init() { - ConfigureLogger("dev") -} - -func ConfigureLogger(logType string) { - var cfg zap.Config - if logType == "prod" { - cfg = zap.NewProductionConfig() - cfg.EncoderConfig.LevelKey = "severity" - cfg.EncoderConfig.MessageKey = "message" - } else { - cfg = zap.NewDevelopmentConfig() - cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder - } - logger, err := cfg.Build() - if err != nil { - log.Fatalln("createLogger", err) - } - Logger = logger.Sugar() -} - -var CliLogger = createCliLogger() - -func createCliLogger() *zap.SugaredLogger { - cfg := zap.NewDevelopmentConfig() - cfg.EncoderConfig.TimeKey = "" - cfg.EncoderConfig.LevelKey = "" - cfg.DisableCaller = true - logger, err := cfg.Build() - if err != nil { - log.Fatalln("createLogger", err) - } - - return logger.Sugar() -} - -func WithRequestID(ctx context.Context, id string) context.Context { - return context.WithValue(ctx, middleware.RequestIDKey, id) -} - -func RequestIDLogger(r *http.Request) *zap.SugaredLogger { - return ContextLogger(r.Context()) -} - -func ContextLogger(ctx context.Context) *zap.SugaredLogger { - proposedLogger := Logger - if ctx != nil { - if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok { - proposedLogger = proposedLogger.With(zap.String("requestID", ctxRequestID)) - } - } - - return proposedLogger -} diff --git a/vendor/github.com/sigstore/fulcio/pkg/oauthflow/oidc.go b/vendor/github.com/sigstore/fulcio/pkg/oauthflow/oidc.go deleted file mode 100644 index 693ef0e0d1..0000000000 --- a/vendor/github.com/sigstore/fulcio/pkg/oauthflow/oidc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package oauthflow - -import ( - "fmt" - - "github.com/PaesslerAG/jsonpath" - "github.com/coreos/go-oidc/v3/oidc" -) - -func EmailFromIDToken(token *oidc.IDToken) (string, bool, error) { - // Extract custom claims - var claims struct { - Email string `json:"email"` - Verified bool `json:"email_verified"` - } - if err := token.Claims(&claims); err != nil { - return "", false, err - } - - return claims.Email, claims.Verified, nil -} - -func IssuerFromIDToken(token *oidc.IDToken, claimJSONPath string) (string, error) { - if claimJSONPath == "" { - return token.Issuer, nil - } - v := interface{}(nil) - if err := token.Claims(&v); err != nil { - return "", err - } - result, err := jsonpath.Get(claimJSONPath, v) - if err != nil { - return "", err - } - return fmt.Sprintf("%v", result), nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go index f676e7e964..9d2804916d 100644 --- a/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go @@ -33,10 +33,10 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error o := makeOptions(opts...) rt := httptransport.New(url.Host, client.DefaultBasePath, []string{url.Scheme}) - rt.Consumers["application/yaml"] = YamlConsumer() + rt.Consumers["application/json"] = runtime.JSONConsumer() rt.Consumers["application/x-pem-file"] = runtime.TextConsumer() rt.Consumers["application/pem-certificate-chain"] = runtime.TextConsumer() - rt.Producers["application/yaml"] = YamlProducer() + rt.Producers["application/json"] = runtime.JSONProducer() rt.Producers["application/timestamp-query"] = runtime.ByteStreamProducer() rt.Consumers["application/timestamp-reply"] = runtime.ByteStreamConsumer() diff --git a/vendor/github.com/sigstore/rekor/pkg/client/yaml.go b/vendor/github.com/sigstore/rekor/pkg/client/yaml.go deleted file mode 100644 index e738cf9f44..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/client/yaml.go +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/ghodss/yaml" - "github.com/go-openapi/runtime" -) - -func YamlConsumer() runtime.Consumer { - return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error { - bytes, err := ioutil.ReadAll(r) - if err != nil { - return err - } - return yaml.Unmarshal(bytes, v) - }) -} - -func YamlProducer() runtime.Producer { - return runtime.ProducerFunc(func(w io.Writer, v interface{}) error { - b, err := yaml.Marshal(v) - if err != nil { - return err - } - _, err = io.Copy(w, bytes.NewReader(b)) - return err - }) -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go index a6a9bf19fe..05c6abb928 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go @@ -70,8 +70,8 @@ func (a *Client) CreateLogEntry(params *CreateLogEntryParams, opts ...ClientOpti ID: "createLogEntry", Method: "POST", PathPattern: "/api/v1/log/entries", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &CreateLogEntryReader{formats: a.formats}, @@ -107,8 +107,8 @@ func (a *Client) GetLogEntryByIndex(params *GetLogEntryByIndexParams, opts ...Cl ID: "getLogEntryByIndex", Method: "GET", PathPattern: "/api/v1/log/entries", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetLogEntryByIndexReader{formats: a.formats}, @@ -146,8 +146,8 @@ func (a *Client) GetLogEntryByUUID(params *GetLogEntryByUUIDParams, opts ...Clie ID: "getLogEntryByUUID", Method: "GET", PathPattern: "/api/v1/log/entries/{entryUUID}", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetLogEntryByUUIDReader{formats: a.formats}, @@ -183,8 +183,8 @@ func (a *Client) SearchLogQuery(params *SearchLogQueryParams, opts ...ClientOpti ID: "searchLogQuery", Method: "POST", PathPattern: "/api/v1/log/entries/retrieve", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &SearchLogQueryReader{formats: a.formats}, diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go index bbe0f579ce..f80b04afbb 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go @@ -61,8 +61,8 @@ func (a *Client) SearchIndex(params *SearchIndexParams, opts ...ClientOption) (* ID: "searchIndex", Method: "POST", PathPattern: "/api/v1/index/retrieve", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &SearchIndexReader{formats: a.formats}, diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go index 43c4c5d326..d878ea00d3 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_parameters.go @@ -74,6 +74,13 @@ func NewGetPublicKeyParamsWithHTTPClient(client *http.Client) *GetPublicKeyParam Typically these are written to a http.Request. */ type GetPublicKeyParams struct { + + /* TreeID. + + The tree ID of the tree you wish to get a public key for + */ + TreeID *string + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -127,6 +134,17 @@ func (o *GetPublicKeyParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } +// WithTreeID adds the treeID to the get public key params +func (o *GetPublicKeyParams) WithTreeID(treeID *string) *GetPublicKeyParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get public key params +func (o *GetPublicKeyParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + // WriteToRequest writes these params to a swagger request func (o *GetPublicKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -135,6 +153,23 @@ func (o *GetPublicKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt. } var res []error + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go index dd09cae309..0f780ad9b2 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go @@ -64,7 +64,7 @@ func (a *Client) GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) Method: "GET", PathPattern: "/api/v1/log/publicKey", ProducesMediaTypes: []string{"application/x-pem-file"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetPublicKeyReader{formats: a.formats}, diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go index d2b1cbc904..7cfeaec77b 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/rekor_client.go @@ -30,7 +30,6 @@ import ( "github.com/sigstore/rekor/pkg/generated/client/index" "github.com/sigstore/rekor/pkg/generated/client/pubkey" serverops "github.com/sigstore/rekor/pkg/generated/client/server" - "github.com/sigstore/rekor/pkg/generated/client/timestamp" "github.com/sigstore/rekor/pkg/generated/client/tlog" ) @@ -80,7 +79,6 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) *Rekor { cli.Index = index.New(transport, formats) cli.Pubkey = pubkey.New(transport, formats) cli.Server = serverops.New(transport, formats) - cli.Timestamp = timestamp.New(transport, formats) cli.Tlog = tlog.New(transport, formats) return cli } @@ -134,8 +132,6 @@ type Rekor struct { Server serverops.ClientService - Timestamp timestamp.ClientService - Tlog tlog.ClientService Transport runtime.ClientTransport @@ -148,6 +144,5 @@ func (c *Rekor) SetTransport(transport runtime.ClientTransport) { c.Index.SetTransport(transport) c.Pubkey.SetTransport(transport) c.Server.SetTransport(transport) - c.Timestamp.SetTransport(transport) c.Tlog.SetTransport(transport) } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/server/server_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/server/server_client.go index 2bd9b8864a..f48e55062f 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/server/server_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/server/server_client.go @@ -61,8 +61,8 @@ func (a *Client) GetRekorVersion(params *GetRekorVersionParams, opts ...ClientOp ID: "getRekorVersion", Method: "GET", PathPattern: "/api/v1/version", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetRekorVersionReader{formats: a.formats}, diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_parameters.go deleted file mode 100644 index 5fee86598d..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_parameters.go +++ /dev/null @@ -1,142 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package timestamp - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetTimestampCertChainParams creates a new GetTimestampCertChainParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetTimestampCertChainParams() *GetTimestampCertChainParams { - return &GetTimestampCertChainParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetTimestampCertChainParamsWithTimeout creates a new GetTimestampCertChainParams object -// with the ability to set a timeout on a request. -func NewGetTimestampCertChainParamsWithTimeout(timeout time.Duration) *GetTimestampCertChainParams { - return &GetTimestampCertChainParams{ - timeout: timeout, - } -} - -// NewGetTimestampCertChainParamsWithContext creates a new GetTimestampCertChainParams object -// with the ability to set a context for a request. -func NewGetTimestampCertChainParamsWithContext(ctx context.Context) *GetTimestampCertChainParams { - return &GetTimestampCertChainParams{ - Context: ctx, - } -} - -// NewGetTimestampCertChainParamsWithHTTPClient creates a new GetTimestampCertChainParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetTimestampCertChainParamsWithHTTPClient(client *http.Client) *GetTimestampCertChainParams { - return &GetTimestampCertChainParams{ - HTTPClient: client, - } -} - -/* GetTimestampCertChainParams contains all the parameters to send to the API endpoint - for the get timestamp cert chain operation. - - Typically these are written to a http.Request. -*/ -type GetTimestampCertChainParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get timestamp cert chain params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetTimestampCertChainParams) WithDefaults() *GetTimestampCertChainParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get timestamp cert chain params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetTimestampCertChainParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) WithTimeout(timeout time.Duration) *GetTimestampCertChainParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) WithContext(ctx context.Context) *GetTimestampCertChainParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) WithHTTPClient(client *http.Client) *GetTimestampCertChainParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get timestamp cert chain params -func (o *GetTimestampCertChainParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetTimestampCertChainParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_responses.go deleted file mode 100644 index 34bbf52844..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_cert_chain_responses.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package timestamp - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/sigstore/rekor/pkg/generated/models" -) - -// GetTimestampCertChainReader is a Reader for the GetTimestampCertChain structure. -type GetTimestampCertChainReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetTimestampCertChainReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetTimestampCertChainOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetTimestampCertChainNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - result := NewGetTimestampCertChainDefault(response.Code()) - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - if response.Code()/100 == 2 { - return result, nil - } - return nil, result - } -} - -// NewGetTimestampCertChainOK creates a GetTimestampCertChainOK with default headers values -func NewGetTimestampCertChainOK() *GetTimestampCertChainOK { - return &GetTimestampCertChainOK{} -} - -/* GetTimestampCertChainOK describes a response with status code 200, with default header values. - -The PEM encoded cert chain -*/ -type GetTimestampCertChainOK struct { - Payload string -} - -func (o *GetTimestampCertChainOK) Error() string { - return fmt.Sprintf("[GET /api/v1/timestamp/certchain][%d] getTimestampCertChainOK %+v", 200, o.Payload) -} -func (o *GetTimestampCertChainOK) GetPayload() string { - return o.Payload -} - -func (o *GetTimestampCertChainOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetTimestampCertChainNotFound creates a GetTimestampCertChainNotFound with default headers values -func NewGetTimestampCertChainNotFound() *GetTimestampCertChainNotFound { - return &GetTimestampCertChainNotFound{} -} - -/* GetTimestampCertChainNotFound describes a response with status code 404, with default header values. - -The content requested could not be found -*/ -type GetTimestampCertChainNotFound struct { -} - -func (o *GetTimestampCertChainNotFound) Error() string { - return fmt.Sprintf("[GET /api/v1/timestamp/certchain][%d] getTimestampCertChainNotFound ", 404) -} - -func (o *GetTimestampCertChainNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetTimestampCertChainDefault creates a GetTimestampCertChainDefault with default headers values -func NewGetTimestampCertChainDefault(code int) *GetTimestampCertChainDefault { - return &GetTimestampCertChainDefault{ - _statusCode: code, - } -} - -/* GetTimestampCertChainDefault describes a response with status code -1, with default header values. - -There was an internal error in the server while processing the request -*/ -type GetTimestampCertChainDefault struct { - _statusCode int - - Payload *models.Error -} - -// Code gets the status code for the get timestamp cert chain default response -func (o *GetTimestampCertChainDefault) Code() int { - return o._statusCode -} - -func (o *GetTimestampCertChainDefault) Error() string { - return fmt.Sprintf("[GET /api/v1/timestamp/certchain][%d] getTimestampCertChain default %+v", o._statusCode, o.Payload) -} -func (o *GetTimestampCertChainDefault) GetPayload() *models.Error { - return o.Payload -} - -func (o *GetTimestampCertChainDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Error) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_parameters.go deleted file mode 100644 index 896564766e..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_parameters.go +++ /dev/null @@ -1,165 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package timestamp - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "io" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetTimestampResponseParams creates a new GetTimestampResponseParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetTimestampResponseParams() *GetTimestampResponseParams { - return &GetTimestampResponseParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetTimestampResponseParamsWithTimeout creates a new GetTimestampResponseParams object -// with the ability to set a timeout on a request. -func NewGetTimestampResponseParamsWithTimeout(timeout time.Duration) *GetTimestampResponseParams { - return &GetTimestampResponseParams{ - timeout: timeout, - } -} - -// NewGetTimestampResponseParamsWithContext creates a new GetTimestampResponseParams object -// with the ability to set a context for a request. -func NewGetTimestampResponseParamsWithContext(ctx context.Context) *GetTimestampResponseParams { - return &GetTimestampResponseParams{ - Context: ctx, - } -} - -// NewGetTimestampResponseParamsWithHTTPClient creates a new GetTimestampResponseParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetTimestampResponseParamsWithHTTPClient(client *http.Client) *GetTimestampResponseParams { - return &GetTimestampResponseParams{ - HTTPClient: client, - } -} - -/* GetTimestampResponseParams contains all the parameters to send to the API endpoint - for the get timestamp response operation. - - Typically these are written to a http.Request. -*/ -type GetTimestampResponseParams struct { - - // Request. - // - // Format: binary - Request io.ReadCloser - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get timestamp response params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetTimestampResponseParams) WithDefaults() *GetTimestampResponseParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get timestamp response params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetTimestampResponseParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get timestamp response params -func (o *GetTimestampResponseParams) WithTimeout(timeout time.Duration) *GetTimestampResponseParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get timestamp response params -func (o *GetTimestampResponseParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get timestamp response params -func (o *GetTimestampResponseParams) WithContext(ctx context.Context) *GetTimestampResponseParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get timestamp response params -func (o *GetTimestampResponseParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get timestamp response params -func (o *GetTimestampResponseParams) WithHTTPClient(client *http.Client) *GetTimestampResponseParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get timestamp response params -func (o *GetTimestampResponseParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithRequest adds the request to the get timestamp response params -func (o *GetTimestampResponseParams) WithRequest(request io.ReadCloser) *GetTimestampResponseParams { - o.SetRequest(request) - return o -} - -// SetRequest adds the request to the get timestamp response params -func (o *GetTimestampResponseParams) SetRequest(request io.ReadCloser) { - o.Request = request -} - -// WriteToRequest writes these params to a swagger request -func (o *GetTimestampResponseParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Request != nil { - if err := r.SetBodyParam(o.Request); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_responses.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_responses.go deleted file mode 100644 index 894570fa8f..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/get_timestamp_response_responses.go +++ /dev/null @@ -1,244 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package timestamp - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - - "github.com/sigstore/rekor/pkg/generated/models" -) - -// GetTimestampResponseReader is a Reader for the GetTimestampResponse structure. -type GetTimestampResponseReader struct { - formats strfmt.Registry - writer io.Writer -} - -// ReadResponse reads a server response into the received o. -func (o *GetTimestampResponseReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 201: - result := NewGetTimestampResponseCreated(o.writer) - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetTimestampResponseBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 501: - result := NewGetTimestampResponseNotImplemented() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - result := NewGetTimestampResponseDefault(response.Code()) - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - if response.Code()/100 == 2 { - return result, nil - } - return nil, result - } -} - -// NewGetTimestampResponseCreated creates a GetTimestampResponseCreated with default headers values -func NewGetTimestampResponseCreated(writer io.Writer) *GetTimestampResponseCreated { - return &GetTimestampResponseCreated{ - - Payload: writer, - } -} - -/* GetTimestampResponseCreated describes a response with status code 201, with default header values. - -Returns a timestamp response and the location of the log entry in the transprency log -*/ -type GetTimestampResponseCreated struct { - - /* UUID of the log entry made for the timestamp response - */ - ETag string - - /* Log index of the log entry made for the timestamp response - */ - Index int64 - - /* URI location of the log entry made for the timestamp response - - Format: uri - */ - Location strfmt.URI - - Payload io.Writer -} - -func (o *GetTimestampResponseCreated) Error() string { - return fmt.Sprintf("[POST /api/v1/timestamp][%d] getTimestampResponseCreated %+v", 201, o.Payload) -} -func (o *GetTimestampResponseCreated) GetPayload() io.Writer { - return o.Payload -} - -func (o *GetTimestampResponseCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // hydrates response header ETag - hdrETag := response.GetHeader("ETag") - - if hdrETag != "" { - o.ETag = hdrETag - } - - // hydrates response header Index - hdrIndex := response.GetHeader("Index") - - if hdrIndex != "" { - valindex, err := swag.ConvertInt64(hdrIndex) - if err != nil { - return errors.InvalidType("Index", "header", "int64", hdrIndex) - } - o.Index = valindex - } - - // hydrates response header Location - hdrLocation := response.GetHeader("Location") - - if hdrLocation != "" { - vallocation, err := formats.Parse("uri", hdrLocation) - if err != nil { - return errors.InvalidType("Location", "header", "strfmt.URI", hdrLocation) - } - o.Location = *(vallocation.(*strfmt.URI)) - } - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetTimestampResponseBadRequest creates a GetTimestampResponseBadRequest with default headers values -func NewGetTimestampResponseBadRequest() *GetTimestampResponseBadRequest { - return &GetTimestampResponseBadRequest{} -} - -/* GetTimestampResponseBadRequest describes a response with status code 400, with default header values. - -The content supplied to the server was invalid -*/ -type GetTimestampResponseBadRequest struct { - Payload *models.Error -} - -func (o *GetTimestampResponseBadRequest) Error() string { - return fmt.Sprintf("[POST /api/v1/timestamp][%d] getTimestampResponseBadRequest %+v", 400, o.Payload) -} -func (o *GetTimestampResponseBadRequest) GetPayload() *models.Error { - return o.Payload -} - -func (o *GetTimestampResponseBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Error) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetTimestampResponseNotImplemented creates a GetTimestampResponseNotImplemented with default headers values -func NewGetTimestampResponseNotImplemented() *GetTimestampResponseNotImplemented { - return &GetTimestampResponseNotImplemented{} -} - -/* GetTimestampResponseNotImplemented describes a response with status code 501, with default header values. - -The content requested is not implemented -*/ -type GetTimestampResponseNotImplemented struct { -} - -func (o *GetTimestampResponseNotImplemented) Error() string { - return fmt.Sprintf("[POST /api/v1/timestamp][%d] getTimestampResponseNotImplemented ", 501) -} - -func (o *GetTimestampResponseNotImplemented) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetTimestampResponseDefault creates a GetTimestampResponseDefault with default headers values -func NewGetTimestampResponseDefault(code int) *GetTimestampResponseDefault { - return &GetTimestampResponseDefault{ - _statusCode: code, - } -} - -/* GetTimestampResponseDefault describes a response with status code -1, with default header values. - -There was an internal error in the server while processing the request -*/ -type GetTimestampResponseDefault struct { - _statusCode int - - Payload *models.Error -} - -// Code gets the status code for the get timestamp response default response -func (o *GetTimestampResponseDefault) Code() int { - return o._statusCode -} - -func (o *GetTimestampResponseDefault) Error() string { - return fmt.Sprintf("[POST /api/v1/timestamp][%d] getTimestampResponse default %+v", o._statusCode, o.Payload) -} -func (o *GetTimestampResponseDefault) GetPayload() *models.Error { - return o.Payload -} - -func (o *GetTimestampResponseDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Error) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/timestamp_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/timestamp_client.go deleted file mode 100644 index 6e098768e2..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/timestamp/timestamp_client.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package timestamp - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new timestamp API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for timestamp API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetTimestampCertChain(params *GetTimestampCertChainParams, opts ...ClientOption) (*GetTimestampCertChainOK, error) - - GetTimestampResponse(params *GetTimestampResponseParams, writer io.Writer, opts ...ClientOption) (*GetTimestampResponseCreated, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* - GetTimestampCertChain retrieves the certfiicate chain for timestamping that can be used to validate trusted timestamps - - Returns the certfiicate chain for timestamping that can be used to validate trusted timestamps -*/ -func (a *Client) GetTimestampCertChain(params *GetTimestampCertChainParams, opts ...ClientOption) (*GetTimestampCertChainOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetTimestampCertChainParams() - } - op := &runtime.ClientOperation{ - ID: "getTimestampCertChain", - Method: "GET", - PathPattern: "/api/v1/timestamp/certchain", - ProducesMediaTypes: []string{"application/pem-certificate-chain"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetTimestampCertChainReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetTimestampCertChainOK) - if ok { - return success, nil - } - // unexpected success response - unexpectedSuccess := result.(*GetTimestampCertChainDefault) - return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) -} - -/* - GetTimestampResponse generates a new timestamp response and creates a new log entry for the timestamp in the transparency log -*/ -func (a *Client) GetTimestampResponse(params *GetTimestampResponseParams, writer io.Writer, opts ...ClientOption) (*GetTimestampResponseCreated, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetTimestampResponseParams() - } - op := &runtime.ClientOperation{ - ID: "getTimestampResponse", - Method: "POST", - PathPattern: "/api/v1/timestamp", - ProducesMediaTypes: []string{"application/timestamp-reply"}, - ConsumesMediaTypes: []string{"application/timestamp-query"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetTimestampResponseReader{formats: a.formats, writer: writer}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetTimestampResponseCreated) - if ok { - return success, nil - } - // unexpected success response - unexpectedSuccess := result.(*GetTimestampResponseDefault) - return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go index 403dd3826f..8d504b6cfc 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_parameters.go @@ -91,6 +91,12 @@ type GetLogProofParams struct { */ LastSize int64 + /* TreeID. + + The tree ID of the tree that you wish to prove consistency for + */ + TreeID *string + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -177,6 +183,17 @@ func (o *GetLogProofParams) SetLastSize(lastSize int64) { o.LastSize = lastSize } +// WithTreeID adds the treeID to the get log proof params +func (o *GetLogProofParams) WithTreeID(treeID *string) *GetLogProofParams { + o.SetTreeID(treeID) + return o +} + +// SetTreeID adds the treeId to the get log proof params +func (o *GetLogProofParams) SetTreeID(treeID *string) { + o.TreeID = treeID +} + // WriteToRequest writes these params to a swagger request func (o *GetLogProofParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -212,6 +229,23 @@ func (o *GetLogProofParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.R } } + if o.TreeID != nil { + + // query param treeID + var qrTreeID string + + if o.TreeID != nil { + qrTreeID = *o.TreeID + } + qTreeID := qrTreeID + if qTreeID != "" { + + if err := r.SetQueryParam("treeID", qTreeID); err != nil { + return err + } + } + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go index 2fbb024d8d..7fd8ffa195 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go @@ -65,8 +65,8 @@ func (a *Client) GetLogInfo(params *GetLogInfoParams, opts ...ClientOption) (*Ge ID: "getLogInfo", Method: "GET", PathPattern: "/api/v1/log", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetLogInfoReader{formats: a.formats}, @@ -104,8 +104,8 @@ func (a *Client) GetLogProof(params *GetLogProofParams, opts ...ClientOption) (* ID: "getLogProof", Method: "GET", PathPattern: "/api/v1/log/proof", - ProducesMediaTypes: []string{"application/json;q=1", "application/yaml"}, - ConsumesMediaTypes: []string{"application/json", "application/yaml"}, + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, Schemes: []string{"http"}, Params: params, Reader: &GetLogProofReader{formats: a.formats}, diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go index a50f5fbc98..999a486975 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go @@ -188,10 +188,6 @@ type AlpineV001SchemaPackage struct { // Values of the .PKGINFO key / value pairs // Read Only: true Pkginfo map[string]string `json:"pkginfo,omitempty"` - - // Specifies the location of the package; if this is specified, a hash value must also be provided - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this alpine v001 schema package @@ -202,10 +198,6 @@ func (m *AlpineV001SchemaPackage) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -231,18 +223,6 @@ func (m *AlpineV001SchemaPackage) validateHash(formats strfmt.Registry) error { return nil } -func (m *AlpineV001SchemaPackage) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("package"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this alpine v001 schema package based on the context it is used func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -382,8 +362,13 @@ func (m *AlpineV001SchemaPackageHash) validateValue(formats strfmt.Registry) err return nil } -// ContextValidate validates this alpine v001 schema package hash based on context it is used +// ContextValidate validate this alpine v001 schema package hash based on the context it is used func (m *AlpineV001SchemaPackageHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } return nil } @@ -411,19 +396,16 @@ func (m *AlpineV001SchemaPackageHash) UnmarshalBinary(b []byte) error { type AlpineV001SchemaPublicKey struct { // Specifies the content of the public key inline within the document + // Required: true // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // Specifies the location of the public key - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + Content *strfmt.Base64 `json:"content"` } // Validate validates this alpine v001 schema public key func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateURL(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } @@ -433,12 +415,9 @@ func (m *AlpineV001SchemaPublicKey) Validate(formats strfmt.Registry) error { return nil } -func (m *AlpineV001SchemaPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } +func (m *AlpineV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { return err } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go new file mode 100644 index 0000000000..8de4083baf --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Cose COSE object +// +// swagger:model cose +type Cose struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` +} + +// Kind gets the kind of this subtype +func (m *Cose) Kind() string { + return "cose" +} + +// SetKind sets the kind of this subtype +func (m *Cose) SetKind(val string) { +} + +// UnmarshalJSON unmarshals this object with a polymorphic type from a JSON structure +func (m *Cose) UnmarshalJSON(raw []byte) error { + var data struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + } + buf := bytes.NewBuffer(raw) + dec := json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&data); err != nil { + return err + } + + var base struct { + /* Just the base type fields. Used for unmashalling polymorphic types.*/ + + Kind string `json:"kind"` + } + buf = bytes.NewBuffer(raw) + dec = json.NewDecoder(buf) + dec.UseNumber() + + if err := dec.Decode(&base); err != nil { + return err + } + + var result Cose + + if base.Kind != result.Kind() { + /* Not the type we're looking for. */ + return errors.New(422, "invalid kind value: %q", base.Kind) + } + + result.APIVersion = data.APIVersion + result.Spec = data.Spec + + *m = result + + return nil +} + +// MarshalJSON marshals this object with a polymorphic type to a JSON structure +func (m Cose) MarshalJSON() ([]byte, error) { + var b1, b2, b3 []byte + var err error + b1, err = json.Marshal(struct { + + // api version + // Required: true + // Pattern: ^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + APIVersion *string `json:"apiVersion"` + + // spec + // Required: true + Spec CoseSchema `json:"spec"` + }{ + + APIVersion: m.APIVersion, + + Spec: m.Spec, + }) + if err != nil { + return nil, err + } + b2, err = json.Marshal(struct { + Kind string `json:"kind"` + }{ + + Kind: m.Kind(), + }) + if err != nil { + return nil, err + } + + return swag.ConcatJSON(b1, b2, b3), nil +} + +// Validate validates this cose +func (m *Cose) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAPIVersion(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSpec(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Cose) validateAPIVersion(formats strfmt.Registry) error { + + if err := validate.Required("apiVersion", "body", m.APIVersion); err != nil { + return err + } + + if err := validate.Pattern("apiVersion", "body", *m.APIVersion, `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`); err != nil { + return err + } + + return nil +} + +func (m *Cose) validateSpec(formats strfmt.Registry) error { + + if m.Spec == nil { + return errors.Required("spec", "body", nil) + } + + return nil +} + +// ContextValidate validate this cose based on the context it is used +func (m *Cose) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *Cose) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Cose) UnmarshalBinary(b []byte) error { + var res Cose + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go new file mode 100644 index 0000000000..1d4f0dca13 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_schema.go @@ -0,0 +1,29 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CoseSchema COSE Schema +// +// COSE for Rekord objects +// +// swagger:model coseSchema +type CoseSchema interface{} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go new file mode 100644 index 0000000000..caadb44d4c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -0,0 +1,508 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// CoseV001Schema cose v0.0.1 Schema +// +// Schema for cose object +// +// swagger:model coseV001Schema +type CoseV001Schema struct { + + // data + // Required: true + Data *CoseV001SchemaData `json:"data"` + + // The COSE Sign1 Message + // Format: byte + Message strfmt.Base64 `json:"message,omitempty"` + + // The public key that can verify the signature + // Required: true + // Format: byte + PublicKey *strfmt.Base64 `json:"publicKey"` +} + +// Validate validates this cose v001 schema +func (m *CoseV001Schema) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateData(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePublicKey(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) validateData(formats strfmt.Registry) error { + + if err := validate.Required("data", "body", m.Data); err != nil { + return err + } + + if m.Data != nil { + if err := m.Data.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data") + } + return err + } + } + + return nil +} + +func (m *CoseV001Schema) validatePublicKey(formats strfmt.Registry) error { + + if err := validate.Required("publicKey", "body", m.PublicKey); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema based on the context it is used +func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateData(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { + + if m.Data != nil { + if err := m.Data.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001Schema) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001Schema) UnmarshalBinary(b []byte) error { + var res CoseV001Schema + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaData Information about the content associated with the entry +// +// swagger:model CoseV001SchemaData +type CoseV001SchemaData struct { + + // Specifies the additional authenticated data required to verify the signature + // Format: byte + Aad strfmt.Base64 `json:"aad,omitempty"` + + // envelope hash + EnvelopeHash *CoseV001SchemaDataEnvelopeHash `json:"envelopeHash,omitempty"` + + // payload hash + PayloadHash *CoseV001SchemaDataPayloadHash `json:"payloadHash,omitempty"` +} + +// Validate validates this cose v001 schema data +func (m *CoseV001SchemaData) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEnvelopeHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) validateEnvelopeHash(formats strfmt.Registry) error { + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data" + "." + "envelopeHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data" + "." + "envelopeHash") + } + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data" + "." + "payloadHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data" + "." + "payloadHash") + } + return err + } + } + + return nil +} + +// ContextValidate validate this cose v001 schema data based on the context it is used +func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateEnvelopeHash(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { + + if m.EnvelopeHash != nil { + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data" + "." + "envelopeHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data" + "." + "envelopeHash") + } + return err + } + } + + return nil +} + +func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("data" + "." + "payloadHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("data" + "." + "payloadHash") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaData) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaData) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaData + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataEnvelopeHash Specifies the hash algorithm and value for the COSE envelope +// +// swagger:model CoseV001SchemaDataEnvelopeHash +type CoseV001SchemaDataEnvelopeHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: [sha256] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data envelope hash +func (m *CoseV001SchemaDataEnvelopeHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum = append(coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataEnvelopeHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataEnvelopeHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataEnvelopeHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"envelopeHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataEnvelopeHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"envelopeHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data envelope hash based on the context it is used +func (m *CoseV001SchemaDataEnvelopeHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataEnvelopeHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataEnvelopeHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// CoseV001SchemaDataPayloadHash Specifies the hash algorithm and value for the content +// +// swagger:model CoseV001SchemaDataPayloadHash +type CoseV001SchemaDataPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: [sha256] + Algorithm *string `json:"algorithm"` + + // The hash value for the content + // Required: true + Value *string `json:"value"` +} + +// Validate validates this cose v001 schema data payload hash +func (m *CoseV001SchemaDataPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum = append(coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // CoseV001SchemaDataPayloadHashAlgorithmSha256 captures enum value "sha256" + CoseV001SchemaDataPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, coseV001SchemaDataPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("data"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *CoseV001SchemaDataPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("data"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this cose v001 schema data payload hash based on the context it is used +func (m *CoseV001SchemaDataPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CoseV001SchemaDataPayloadHash) UnmarshalBinary(b []byte) error { + var res CoseV001SchemaDataPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go index 50fd24827e..fb9edfb389 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -462,12 +462,12 @@ func (m *HashedrekordV001SchemaSignature) UnmarshalBinary(b []byte) error { return nil } -// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature +// HashedrekordV001SchemaSignaturePublicKey The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information // // swagger:model HashedrekordV001SchemaSignaturePublicKey type HashedrekordV001SchemaSignaturePublicKey struct { - // Specifies the content of the public key inline within the document + // Specifies the content of the public key or code signing certificate inline within the document // Format: byte Content strfmt.Base64 `json:"content,omitempty"` } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go index c65d84403c..1512dad32f 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go @@ -432,10 +432,6 @@ type HelmV001SchemaChartProvenance struct { // signature Signature *HelmV001SchemaChartProvenanceSignature `json:"signature,omitempty"` - - // Specifies the location of the provenance file - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this helm v001 schema chart provenance @@ -446,10 +442,6 @@ func (m *HelmV001SchemaChartProvenance) Validate(formats strfmt.Registry) error res = append(res, err) } - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -475,18 +467,6 @@ func (m *HelmV001SchemaChartProvenance) validateSignature(formats strfmt.Registr return nil } -func (m *HelmV001SchemaChartProvenance) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("chart"+"."+"provenance"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this helm v001 schema chart provenance based on the context it is used func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -617,19 +597,16 @@ func (m *HelmV001SchemaChartProvenanceSignature) UnmarshalBinary(b []byte) error type HelmV001SchemaPublicKey struct { // Specifies the content of the public key inline within the document + // Required: true // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // Specifies the location of the public key - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + Content *strfmt.Base64 `json:"content"` } // Validate validates this helm v001 schema public key func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateURL(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } @@ -639,12 +616,9 @@ func (m *HelmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { return nil } -func (m *HelmV001SchemaPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } +func (m *HelmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { return err } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go new file mode 100644 index 0000000000..c555eb2da6 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/inactive_shard_log_info.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// +// Copyright 2021 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// InactiveShardLogInfo inactive shard log info +// +// swagger:model InactiveShardLogInfo +type InactiveShardLogInfo struct { + + // The current hash value stored at the root of the merkle tree + // Required: true + // Pattern: ^[0-9a-fA-F]{64}$ + RootHash *string `json:"rootHash"` + + // The current signed tree head + // Required: true + SignedTreeHead *string `json:"signedTreeHead"` + + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + + // The current number of nodes in the merkle tree + // Required: true + // Minimum: 1 + TreeSize *int64 `json:"treeSize"` +} + +// Validate validates this inactive shard log info +func (m *InactiveShardLogInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRootHash(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSignedTreeHead(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTreeSize(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *InactiveShardLogInfo) validateRootHash(formats strfmt.Registry) error { + + if err := validate.Required("rootHash", "body", m.RootHash); err != nil { + return err + } + + if err := validate.Pattern("rootHash", "body", *m.RootHash, `^[0-9a-fA-F]{64}$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateSignedTreeHead(formats strfmt.Registry) error { + + if err := validate.Required("signedTreeHead", "body", m.SignedTreeHead); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + +func (m *InactiveShardLogInfo) validateTreeSize(formats strfmt.Registry) error { + + if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { + return err + } + + if err := validate.MinimumInt("treeSize", "body", *m.TreeSize, 1, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this inactive shard log info based on context it is used +func (m *InactiveShardLogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *InactiveShardLogInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *InactiveShardLogInfo) UnmarshalBinary(b []byte) error { + var res InactiveShardLogInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go index 58fc5fd5dd..e43e699f79 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -153,6 +153,9 @@ type IntotoV001SchemaContent struct { // hash Hash *IntotoV001SchemaContentHash `json:"hash,omitempty"` + + // payload hash + PayloadHash *IntotoV001SchemaContentPayloadHash `json:"payloadHash,omitempty"` } // Validate validates this intoto v001 schema content @@ -163,6 +166,10 @@ func (m *IntotoV001SchemaContent) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validatePayloadHash(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -188,6 +195,25 @@ func (m *IntotoV001SchemaContent) validateHash(formats strfmt.Registry) error { return nil } +func (m *IntotoV001SchemaContent) validatePayloadHash(formats strfmt.Registry) error { + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + + if m.PayloadHash != nil { + if err := m.PayloadHash.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("content" + "." + "payloadHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("content" + "." + "payloadHash") + } + return err + } + } + + return nil +} + // ContextValidate validate this intoto v001 schema content based on the context it is used func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -196,6 +222,10 @@ func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats s res = append(res, err) } + if err := m.contextValidatePayloadHash(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -218,6 +248,22 @@ func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, forma return nil } +func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { + + if m.PayloadHash != nil { + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("content" + "." + "payloadHash") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("content" + "." + "payloadHash") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *IntotoV001SchemaContent) MarshalBinary() ([]byte, error) { if m == nil { @@ -345,3 +391,113 @@ func (m *IntotoV001SchemaContentHash) UnmarshalBinary(b []byte) error { *m = res return nil } + +// IntotoV001SchemaContentPayloadHash Specifies the hash algorithm and value covering the payload within the DSSE envelope +// +// swagger:model IntotoV001SchemaContentPayloadHash +type IntotoV001SchemaContentPayloadHash struct { + + // The hashing function used to compute the hash value + // Required: true + // Enum: [sha256] + Algorithm *string `json:"algorithm"` + + // The hash value for the envelope's payload + // Required: true + Value *string `json:"value"` +} + +// Validate validates this intoto v001 schema content payload hash +func (m *IntotoV001SchemaContentPayloadHash) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAlgorithm(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["sha256"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum = append(intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, v) + } +} + +const ( + + // IntotoV001SchemaContentPayloadHashAlgorithmSha256 captures enum value "sha256" + IntotoV001SchemaContentPayloadHashAlgorithmSha256 string = "sha256" +) + +// prop value enum +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithmEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, intotoV001SchemaContentPayloadHashTypeAlgorithmPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateAlgorithm(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"algorithm", "body", m.Algorithm); err != nil { + return err + } + + // value enum + if err := m.validateAlgorithmEnum("content"+"."+"payloadHash"+"."+"algorithm", "body", *m.Algorithm); err != nil { + return err + } + + return nil +} + +func (m *IntotoV001SchemaContentPayloadHash) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("content"+"."+"payloadHash"+"."+"value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this intoto v001 schema content payload hash based on the context it is used +func (m *IntotoV001SchemaContentPayloadHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IntotoV001SchemaContentPayloadHash) UnmarshalBinary(b []byte) error { + var res IntotoV001SchemaContentPayloadHash + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go index e016e96c49..24f6d3b2bc 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go @@ -182,10 +182,6 @@ type JarV001SchemaArchive struct { // hash Hash *JarV001SchemaArchiveHash `json:"hash,omitempty"` - - // Specifies the location of the archive; if this is specified, a hash value must also be provided - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this jar v001 schema archive @@ -196,10 +192,6 @@ func (m *JarV001SchemaArchive) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -225,18 +217,6 @@ func (m *JarV001SchemaArchive) validateHash(formats strfmt.Registry) error { return nil } -func (m *JarV001SchemaArchive) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("archive"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this jar v001 schema archive based on the context it is used func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go index b576bf015c..33178bc563 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go @@ -23,6 +23,7 @@ package models import ( "context" + "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" @@ -35,6 +36,9 @@ import ( // swagger:model LogInfo type LogInfo struct { + // inactive shards + InactiveShards []*InactiveShardLogInfo `json:"inactiveShards"` + // The current hash value stored at the root of the merkle tree // Required: true // Pattern: ^[0-9a-fA-F]{64}$ @@ -44,6 +48,11 @@ type LogInfo struct { // Required: true SignedTreeHead *string `json:"signedTreeHead"` + // The current treeID + // Required: true + // Pattern: ^[0-9]+$ + TreeID *string `json:"treeID"` + // The current number of nodes in the merkle tree // Required: true // Minimum: 1 @@ -54,6 +63,10 @@ type LogInfo struct { func (m *LogInfo) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateInactiveShards(formats); err != nil { + res = append(res, err) + } + if err := m.validateRootHash(formats); err != nil { res = append(res, err) } @@ -62,6 +75,10 @@ func (m *LogInfo) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateTreeID(formats); err != nil { + res = append(res, err) + } + if err := m.validateTreeSize(formats); err != nil { res = append(res, err) } @@ -72,6 +89,32 @@ func (m *LogInfo) Validate(formats strfmt.Registry) error { return nil } +func (m *LogInfo) validateInactiveShards(formats strfmt.Registry) error { + if swag.IsZero(m.InactiveShards) { // not required + return nil + } + + for i := 0; i < len(m.InactiveShards); i++ { + if swag.IsZero(m.InactiveShards[i]) { // not required + continue + } + + if m.InactiveShards[i] != nil { + if err := m.InactiveShards[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + func (m *LogInfo) validateRootHash(formats strfmt.Registry) error { if err := validate.Required("rootHash", "body", m.RootHash); err != nil { @@ -94,6 +137,19 @@ func (m *LogInfo) validateSignedTreeHead(formats strfmt.Registry) error { return nil } +func (m *LogInfo) validateTreeID(formats strfmt.Registry) error { + + if err := validate.Required("treeID", "body", m.TreeID); err != nil { + return err + } + + if err := validate.Pattern("treeID", "body", *m.TreeID, `^[0-9]+$`); err != nil { + return err + } + + return nil +} + func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { if err := validate.Required("treeSize", "body", m.TreeSize); err != nil { @@ -107,8 +163,37 @@ func (m *LogInfo) validateTreeSize(formats strfmt.Registry) error { return nil } -// ContextValidate validates this log info based on context it is used +// ContextValidate validate this log info based on the context it is used func (m *LogInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateInactiveShards(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.InactiveShards); i++ { + + if m.InactiveShards[i] != nil { + if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + return nil } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go index a9c3605863..6ebbf1016b 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/proposed_entry.go @@ -121,6 +121,12 @@ func unmarshalProposedEntry(data []byte, consumer runtime.Consumer) (ProposedEnt return nil, err } return &result, nil + case "cose": + var result Cose + if err := consumer.Consume(buf2, &result); err != nil { + return nil, err + } + return &result, nil case "hashedrekord": var result Hashedrekord if err := consumer.Consume(buf2, &result); err != nil { diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go index 6e371e1eab..ddc6ec2907 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go @@ -184,10 +184,6 @@ type RekordV001SchemaData struct { // hash Hash *RekordV001SchemaDataHash `json:"hash,omitempty"` - - // Specifies the location of the content - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this rekord v001 schema data @@ -198,10 +194,6 @@ func (m *RekordV001SchemaData) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -227,18 +219,6 @@ func (m *RekordV001SchemaData) validateHash(formats strfmt.Registry) error { return nil } -func (m *RekordV001SchemaData) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("data"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this rekord v001 schema data based on the context it is used func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -369,8 +349,13 @@ func (m *RekordV001SchemaDataHash) validateValue(formats strfmt.Registry) error return nil } -// ContextValidate validates this rekord v001 schema data hash based on context it is used +// ContextValidate validate this rekord v001 schema data hash based on the context it is used func (m *RekordV001SchemaDataHash) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } return nil } @@ -398,34 +383,33 @@ func (m *RekordV001SchemaDataHash) UnmarshalBinary(b []byte) error { type RekordV001SchemaSignature struct { // Specifies the content of the signature inline within the document + // Required: true // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` + Content *strfmt.Base64 `json:"content"` // Specifies the format of the signature + // Required: true // Enum: [pgp minisign x509 ssh] - Format string `json:"format,omitempty"` + Format *string `json:"format"` // public key - PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey,omitempty"` - - // Specifies the location of the signature - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + // Required: true + PublicKey *RekordV001SchemaSignaturePublicKey `json:"publicKey"` } // Validate validates this rekord v001 schema signature func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateFormat(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } - if err := m.validatePublicKey(formats); err != nil { + if err := m.validateFormat(formats); err != nil { res = append(res, err) } - if err := m.validateURL(formats); err != nil { + if err := m.validatePublicKey(formats); err != nil { res = append(res, err) } @@ -435,6 +419,15 @@ func (m *RekordV001SchemaSignature) Validate(formats strfmt.Registry) error { return nil } +func (m *RekordV001SchemaSignature) validateContent(formats strfmt.Registry) error { + + if err := validate.Required("signature"+"."+"content", "body", m.Content); err != nil { + return err + } + + return nil +} + var rekordV001SchemaSignatureTypeFormatPropEnum []interface{} func init() { @@ -471,12 +464,13 @@ func (m *RekordV001SchemaSignature) validateFormatEnum(path, location string, va } func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) error { - if swag.IsZero(m.Format) { // not required - return nil + + if err := validate.Required("signature"+"."+"format", "body", m.Format); err != nil { + return err } // value enum - if err := m.validateFormatEnum("signature"+"."+"format", "body", m.Format); err != nil { + if err := m.validateFormatEnum("signature"+"."+"format", "body", *m.Format); err != nil { return err } @@ -484,8 +478,9 @@ func (m *RekordV001SchemaSignature) validateFormat(formats strfmt.Registry) erro } func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) error { - if swag.IsZero(m.PublicKey) { // not required - return nil + + if err := validate.Required("signature"+"."+"publicKey", "body", m.PublicKey); err != nil { + return err } if m.PublicKey != nil { @@ -502,18 +497,6 @@ func (m *RekordV001SchemaSignature) validatePublicKey(formats strfmt.Registry) e return nil } -func (m *RekordV001SchemaSignature) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("signature"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this rekord v001 schema signature based on the context it is used func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -568,19 +551,16 @@ func (m *RekordV001SchemaSignature) UnmarshalBinary(b []byte) error { type RekordV001SchemaSignaturePublicKey struct { // Specifies the content of the public key inline within the document + // Required: true // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // Specifies the location of the public key - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + Content *strfmt.Base64 `json:"content"` } // Validate validates this rekord v001 schema signature public key func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateURL(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } @@ -590,12 +570,9 @@ func (m *RekordV001SchemaSignaturePublicKey) Validate(formats strfmt.Registry) e return nil } -func (m *RekordV001SchemaSignaturePublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } +func (m *RekordV001SchemaSignaturePublicKey) validateContent(formats strfmt.Registry) error { - if err := validate.FormatOf("signature"+"."+"publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + if err := validate.Required("signature"+"."+"publicKey"+"."+"content", "body", m.Content); err != nil { return err } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go index ee22fad93a..6f3cb07c9a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go @@ -188,10 +188,6 @@ type RpmV001SchemaPackage struct { // Values of the RPM headers // Read Only: true Headers map[string]string `json:"headers,omitempty"` - - // Specifies the location of the package; if this is specified, a hash value must also be provided - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this rpm v001 schema package @@ -202,10 +198,6 @@ func (m *RpmV001SchemaPackage) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -231,18 +223,6 @@ func (m *RpmV001SchemaPackage) validateHash(formats strfmt.Registry) error { return nil } -func (m *RpmV001SchemaPackage) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("package"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - - return nil -} - // ContextValidate validate this rpm v001 schema package based on the context it is used func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error @@ -411,19 +391,16 @@ func (m *RpmV001SchemaPackageHash) UnmarshalBinary(b []byte) error { type RpmV001SchemaPublicKey struct { // Specifies the content of the public key inline within the document + // Required: true // Format: byte - Content strfmt.Base64 `json:"content,omitempty"` - - // Specifies the location of the public key - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + Content *strfmt.Base64 `json:"content"` } // Validate validates this rpm v001 schema public key func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateURL(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } @@ -433,12 +410,9 @@ func (m *RpmV001SchemaPublicKey) Validate(formats strfmt.Registry) error { return nil } -func (m *RpmV001SchemaPublicKey) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } +func (m *RpmV001SchemaPublicKey) validateContent(formats strfmt.Registry) error { - if err := validate.FormatOf("publicKey"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { + if err := validate.Required("publicKey"+"."+"content", "body", m.Content); err != nil { return err } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go index 48cc889bbc..ada461b008 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -44,6 +44,10 @@ type SearchIndex struct { // Pattern: ^(sha256:)?[0-9a-fA-F]{64}$|^(sha1:)?[0-9a-fA-F]{40}$ Hash string `json:"hash,omitempty"` + // operator + // Enum: [and or] + Operator string `json:"operator,omitempty"` + // public key PublicKey *SearchIndexPublicKey `json:"publicKey,omitempty"` } @@ -60,6 +64,10 @@ func (m *SearchIndex) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateOperator(formats); err != nil { + res = append(res, err) + } + if err := m.validatePublicKey(formats); err != nil { res = append(res, err) } @@ -94,6 +102,48 @@ func (m *SearchIndex) validateHash(formats strfmt.Registry) error { return nil } +var searchIndexTypeOperatorPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["and","or"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + searchIndexTypeOperatorPropEnum = append(searchIndexTypeOperatorPropEnum, v) + } +} + +const ( + + // SearchIndexOperatorAnd captures enum value "and" + SearchIndexOperatorAnd string = "and" + + // SearchIndexOperatorOr captures enum value "or" + SearchIndexOperatorOr string = "or" +) + +// prop value enum +func (m *SearchIndex) validateOperatorEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, searchIndexTypeOperatorPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *SearchIndex) validateOperator(formats strfmt.Registry) error { + if swag.IsZero(m.Operator) { // not required + return nil + } + + // value enum + if err := m.validateOperatorEnum("operator", "body", m.Operator); err != nil { + return err + } + + return nil +} + func (m *SearchIndex) validatePublicKey(formats strfmt.Registry) error { if swag.IsZero(m.PublicKey) { // not required return nil diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go index 7ce2098f7f..37beafab76 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go @@ -181,7 +181,7 @@ func (m *SearchLogQuery) validateEntryUUIDs(formats strfmt.Registry) error { for i := 0; i < len(m.EntryUUIDs); i++ { - if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^[0-9a-fA-F]{64}$`); err != nil { + if err := validate.Pattern("entryUUIDs"+"."+strconv.Itoa(i), "body", m.EntryUUIDs[i], `^([0-9a-fA-F]{64}|[0-9a-fA-F]{80})$`); err != nil { return err } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go index 440d6532fa..d015607ff0 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go @@ -194,37 +194,12 @@ func (m *TUFV001Schema) UnmarshalBinary(b []byte) error { // swagger:model TUFV001SchemaMetadata type TUFV001SchemaMetadata struct { - // Specifies the archive inline within the document + // Specifies the metadata inline within the document Content interface{} `json:"content,omitempty"` - - // Specifies the location of the archive - // Format: uri - URL strfmt.URI `json:"url,omitempty"` } // Validate validates this TUF v001 schema metadata func (m *TUFV001SchemaMetadata) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateURL(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *TUFV001SchemaMetadata) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } - - if err := validate.FormatOf("metadata"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err - } - return nil } @@ -256,19 +231,16 @@ func (m *TUFV001SchemaMetadata) UnmarshalBinary(b []byte) error { // swagger:model TUFV001SchemaRoot type TUFV001SchemaRoot struct { - // Specifies the archive inline within the document - Content interface{} `json:"content,omitempty"` - - // Specifies the location of the archive - // Format: uri - URL strfmt.URI `json:"url,omitempty"` + // Specifies the metadata inline within the document + // Required: true + Content interface{} `json:"content"` } // Validate validates this TUF v001 schema root func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateURL(formats); err != nil { + if err := m.validateContent(formats); err != nil { res = append(res, err) } @@ -278,13 +250,10 @@ func (m *TUFV001SchemaRoot) Validate(formats strfmt.Registry) error { return nil } -func (m *TUFV001SchemaRoot) validateURL(formats strfmt.Registry) error { - if swag.IsZero(m.URL) { // not required - return nil - } +func (m *TUFV001SchemaRoot) validateContent(formats strfmt.Registry) error { - if err := validate.FormatOf("root"+"."+"url", "body", "uri", m.URL.String(), formats); err != nil { - return err + if m.Content == nil { + return errors.Required("root"+"."+"content", "body", nil) } return nil diff --git a/vendor/github.com/sigstore/rekor/pkg/log/log.go b/vendor/github.com/sigstore/rekor/pkg/log/log.go index 62ad8bbfc6..7473347a97 100644 --- a/vendor/github.com/sigstore/rekor/pkg/log/log.go +++ b/vendor/github.com/sigstore/rekor/pkg/log/log.go @@ -18,7 +18,6 @@ package log import ( "context" "log" - "net/http" "github.com/go-chi/chi/middleware" "go.uber.org/zap" @@ -69,10 +68,10 @@ func WithRequestID(ctx context.Context, id string) context.Context { return context.WithValue(ctx, middleware.RequestIDKey, id) } -func RequestIDLogger(r *http.Request) *zap.SugaredLogger { +func ContextLogger(ctx context.Context) *zap.SugaredLogger { proposedLogger := Logger - if r != nil { - if ctxRequestID, ok := r.Context().Value(middleware.RequestIDKey).(string); ok { + if ctx != nil { + if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok { proposedLogger = proposedLogger.With(zap.String("requestID", ctxRequestID)) } } diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/minisign/minisign.go b/vendor/github.com/sigstore/rekor/pkg/pki/minisign/minisign.go index 666bf16c6b..945db41ca0 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/minisign/minisign.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/minisign/minisign.go @@ -177,3 +177,8 @@ func (k PublicKey) CanonicalValue() ([]byte, error) { func (k PublicKey) EmailAddresses() []string { return nil } + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pgp/pgp.go b/vendor/github.com/sigstore/rekor/pkg/pki/pgp/pgp.go index 8a56abeddd..52f13cc02c 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/pgp/pgp.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/pgp/pgp.go @@ -296,3 +296,8 @@ func (k PublicKey) EmailAddresses() []string { } return names } + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + return k.EmailAddresses() +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pkcs7/pkcs7.go b/vendor/github.com/sigstore/rekor/pkg/pki/pkcs7/pkcs7.go index 10f29a21e0..b2939f7660 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/pkcs7/pkcs7.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/pkcs7/pkcs7.go @@ -209,3 +209,8 @@ func (k PublicKey) EmailAddresses() []string { return names } + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + return k.EmailAddresses() +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/pki.go b/vendor/github.com/sigstore/rekor/pkg/pki/pki.go index d1618034d8..d6a2d2135a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/pki.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/pki.go @@ -24,7 +24,10 @@ import ( // PublicKey Generic object representing a public key (regardless of format & algorithm) type PublicKey interface { CanonicalValue() ([]byte, error) + // Deprecated: EmailAddresses() will be deprecated in favor of Subjects() which will + // also return Subject URIs present in public keys. EmailAddresses() []string + Subjects() []string } // Signature Generic object representing a signature (regardless of format & algorithm) diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/ssh/sign.go b/vendor/github.com/sigstore/rekor/pkg/pki/ssh/sign.go index 3bf2be813a..8d148f79fe 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/ssh/sign.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/ssh/sign.go @@ -75,7 +75,7 @@ func sign(s ssh.AlgorithmSigner, m io.Reader) (*ssh.Signature, error) { // We can use the default value of "" for other key types though. algo := "" if s.PublicKey().Type() == ssh.KeyAlgoRSA { - algo = ssh.SigAlgoRSASHA2512 + algo = ssh.KeyAlgoRSASHA512 } sig, err := s.SignWithAlgorithm(rand.Reader, dataMessageWrapper, algo) if err != nil { diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/ssh/ssh.go b/vendor/github.com/sigstore/rekor/pkg/pki/ssh/ssh.go index 8012905aa0..79c10f6513 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/ssh/ssh.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/ssh/ssh.go @@ -102,3 +102,8 @@ func (k PublicKey) CanonicalValue() ([]byte, error) { func (k PublicKey) EmailAddresses() []string { return nil } + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/tuf/tuf.go b/vendor/github.com/sigstore/rekor/pkg/pki/tuf/tuf.go index 1b63049b21..f90c7c3b92 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/tuf/tuf.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/tuf/tuf.go @@ -122,10 +122,7 @@ func NewPublicKey(r io.Reader) (*PublicKey, error) { db := verify.NewDB() for id, k := range root.Keys { if err := db.AddKey(id, k); err != nil { - // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return nil, err - } + return nil, err } } for name, role := range root.Roles { @@ -169,3 +166,8 @@ func (k PublicKey) SpecVersion() (string, error) { func (k PublicKey) EmailAddresses() []string { return nil } + +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go index f12c0814f2..e82ec9f005 100644 --- a/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go +++ b/vendor/github.com/sigstore/rekor/pkg/pki/x509/x509.go @@ -69,7 +69,17 @@ func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOptio p := key.key if p == nil { - p = key.cert.c.PublicKey + switch { + case key.cert != nil: + p = key.cert.c.PublicKey + case len(key.certs) > 0: + if err := verifyCertChain(key.certs); err != nil { + return err + } + p = key.certs[0].PublicKey + default: + return errors.New("no public key found") + } } verifier, err := sigsig.LoadVerifier(p, crypto.SHA256) @@ -81,8 +91,9 @@ func (s Signature) Verify(r io.Reader, k interface{}, opts ...sigsig.VerifyOptio // PublicKey Public Key that follows the x509 standard type PublicKey struct { - key interface{} - cert *cert + key interface{} + cert *cert + certs []*x509.Certificate } type cert struct { @@ -97,11 +108,21 @@ func NewPublicKey(r io.Reader) (*PublicKey, error) { return nil, err } - block, _ := pem.Decode(rawPub) + block, rest := pem.Decode(rawPub) if block == nil { return nil, errors.New("invalid public key: failure decoding PEM") } + // Handle certificate chain, concatenated PEM-encoded certificates + if len(rest) > 0 { + // Support up to 10 certificates in a chain, to avoid parsing extremely long chains + certs, err := cryptoutils.UnmarshalCertificatesFromPEMLimited(rawPub, 10) + if err != nil { + return nil, err + } + return &PublicKey{certs: certs}, nil + } + switch block.Type { case string(cryptoutils.PublicKeyPEMType): key, err := x509.ParsePKIXPublicKey(block.Bytes) @@ -131,6 +152,8 @@ func (k PublicKey) CanonicalValue() (encoded []byte, err error) { encoded, err = cryptoutils.MarshalPublicKeyToPEM(k.key) case k.cert != nil: encoded, err = cryptoutils.MarshalCertificateToPEM(k.cert.c) + case k.certs != nil: + encoded, err = cryptoutils.MarshalCertificatesToPEM(k.certs) default: err = fmt.Errorf("x509 public key has not been initialized") } @@ -142,15 +165,24 @@ func (k PublicKey) CryptoPubKey() crypto.PublicKey { if k.cert != nil { return k.cert.c.PublicKey } + if len(k.certs) > 0 { + return k.certs[0].PublicKey + } return k.key } // EmailAddresses implements the pki.PublicKey interface func (k PublicKey) EmailAddresses() []string { var names []string + var cert *x509.Certificate if k.cert != nil { - for _, name := range k.cert.c.EmailAddresses { - validate := validator.New() + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + validate := validator.New() + for _, name := range cert.EmailAddresses { errs := validate.Var(name, "required,email") if errs == nil { names = append(names, strings.ToLower(name)) @@ -160,47 +192,54 @@ func (k PublicKey) EmailAddresses() []string { return names } -func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) { - var pemBytes bytes.Buffer - for _, cert := range certChain { - if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { - return nil, err +// Subjects implements the pki.PublicKey interface +func (k PublicKey) Subjects() []string { + var names []string + var cert *x509.Certificate + if k.cert != nil { + cert = k.cert.c + } else if len(k.certs) > 0 { + cert = k.certs[0] + } + if cert != nil { + validate := validator.New() + for _, name := range cert.EmailAddresses { + if errs := validate.Var(name, "required,email"); errs == nil { + names = append(names, strings.ToLower(name)) + } } - } - return pemBytes.Bytes(), nil -} - -func ParseTimestampCertChain(pemBytes []byte) ([]*x509.Certificate, error) { - certChain := []*x509.Certificate{} - var block *pem.Block - block, pemBytes = pem.Decode(pemBytes) - for ; block != nil; block, pemBytes = pem.Decode(pemBytes) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err + for _, name := range cert.URIs { + if errs := validate.Var(name.String(), "required,uri"); errs == nil { + names = append(names, strings.ToLower(name.String())) } - certChain = append(certChain, cert) - } else { - return nil, errors.New("invalid block type") } } + return names +} + +func verifyCertChain(certChain []*x509.Certificate) error { if len(certChain) == 0 { - return nil, errors.New("no valid certificates in chain") + return errors.New("no certificate chain provided") + } + // No certificate chain to verify + if len(certChain) == 1 { + return nil } - // Verify cert chain for timestamping - roots := x509.NewCertPool() - intermediates := x509.NewCertPool() - for _, cert := range certChain[1:(len(certChain) - 1)] { - intermediates.AddCert(cert) + rootPool := x509.NewCertPool() + rootPool.AddCert(certChain[len(certChain)-1]) + subPool := x509.NewCertPool() + for _, c := range certChain[1 : len(certChain)-1] { + subPool.AddCert(c) } - roots.AddCert(certChain[len(certChain)-1]) if _, err := certChain[0].Verify(x509.VerifyOptions{ - Roots: roots, - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageTimeStamping}, - Intermediates: intermediates, + Roots: rootPool, + Intermediates: subPool, + // Allow any key usage + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + // Expired certificates can be uploaded and should be verifiable + CurrentTime: certChain[0].NotBefore, }); err != nil { - return nil, err + return err } - return certChain, nil + return nil } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/README.md b/vendor/github.com/sigstore/rekor/pkg/types/README.md index 272979f68d..f77f2408c8 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/README.md +++ b/vendor/github.com/sigstore/rekor/pkg/types/README.md @@ -20,5 +20,7 @@ Rekor supports pluggable types (aka different schemas) for entries stored in the - Versions: 0.0.1 - RPM Packages [schema](rpm/rpm_schema.json) - Versions: 0.0.1 +- COSE Envelopes [schema](cose/cose_schema.json) + - Versions: 0.0.1 -Refer to [Rekor docs](https://docs.sigstore.dev/rekor/plugable-types) for adding support for new types. +Refer to [Rekor docs](https://docs.sigstore.dev/rekor/pluggable-types) for adding support for new types. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/entries.go b/vendor/github.com/sigstore/rekor/pkg/types/entries.go index dda638ba40..529ede0706 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/entries.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/entries.go @@ -35,10 +35,16 @@ type EntryImpl interface { IndexKeys() ([]string, error) // the keys that should be added to the external index for this entry Canonicalize(ctx context.Context) ([]byte, error) // marshal the canonical entry to be put into the tlog Unmarshal(e models.ProposedEntry) error // unmarshal the abstract entry into the specific struct for this versioned type - Attestation() []byte CreateFromArtifactProperties(context.Context, ArtifactProperties) (models.ProposedEntry, error) } +// EntryWithAttestationImpl specifies the behavior of a versioned type that also stores attestations +type EntryWithAttestationImpl interface { + EntryImpl + AttestationKey() string // returns the key used to look up the attestation from storage (should be sha256:digest) + AttestationKeyValue() (string, []byte) // returns the key to be used when storing the attestation as well as the attestation itself +} + // EntryFactory describes a factory function that can generate structs for a specific versioned type type EntryFactory func() EntryImpl @@ -120,12 +126,13 @@ func CanonicalizeEntry(ctx context.Context, entry EntryImpl) ([]byte, error) { // ArtifactProperties provide a consistent struct for passing values from // CLI flags to the type+version specific CreateProposeEntry() methods type ArtifactProperties struct { - ArtifactPath *url.URL - ArtifactHash string - ArtifactBytes []byte - SignaturePath *url.URL - SignatureBytes []byte - PublicKeyPath *url.URL - PublicKeyBytes []byte - PKIFormat string + AdditionalAuthenticatedData []byte + ArtifactPath *url.URL + ArtifactHash string + ArtifactBytes []byte + SignaturePath *url.URL + SignatureBytes []byte + PublicKeyPath *url.URL + PublicKeyBytes []byte + PKIFormat string } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go index 4ae6b52f83..66395c7a05 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/hashedrekord.go @@ -17,9 +17,9 @@ package hashedrekord import ( "context" + "errors" "fmt" - "github.com/pkg/errors" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/types" ) @@ -52,7 +52,7 @@ func (rt BaseRekordType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImp rekord, ok := pe.(*models.Hashedrekord) if !ok { - return nil, errors.New(fmt.Sprintf("%s, %s", "cannot unmarshal non-hashed Rekord types", pe.Kind())) + return nil, fmt.Errorf("cannot unmarshal non-hashed Rekord types: %s", pe.Kind()) } return rt.VersionedUnmarshal(rekord, *rekord.APIVersion) @@ -64,7 +64,7 @@ func (rt *BaseRekordType) CreateProposedEntry(ctx context.Context, version strin } ei, err := rt.VersionedUnmarshal(nil, version) if err != nil { - return nil, errors.Wrap(err, "fetching hashed Rekord version implementation") + return nil, fmt.Errorf("fetching hashed Rekord version implementation: %w", err) } return ei.CreateFromArtifactProperties(ctx, props) diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go index 234431e5dd..aea92fcb01 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/entry.go @@ -18,9 +18,11 @@ package hashedrekord import ( "bytes" "context" + "crypto/ed25519" "crypto/sha256" "encoding/hex" "encoding/json" + "errors" "fmt" "io/ioutil" "path/filepath" @@ -29,7 +31,6 @@ import ( "github.com/asaskevich/govalidator" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" - "github.com/pkg/errors" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" @@ -73,7 +74,7 @@ func (v V001Entry) IndexKeys() ([]string, error) { if err != nil { return nil, err } - result = append(result, pub.EmailAddresses()...) + result = append(result, pub.Subjects()...) if v.HashedRekordObj.Data.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)) @@ -159,6 +160,11 @@ func (v *V001Entry) validate() (pki.Signature, pki.PublicKey, error) { return nil, nil, types.ValidationError(err) } + _, isEd25519 := keyObj.CryptoPubKey().(ed25519.PublicKey) + if isEd25519 { + return nil, nil, types.ValidationError(errors.New("ed25519 unsupported for hashedrekord")) + } + data := v.HashedRekordObj.Data if data == nil { return nil, nil, types.ValidationError(errors.New("missing data")) @@ -177,16 +183,12 @@ func (v *V001Entry) validate() (pki.Signature, pki.PublicKey, error) { return nil, nil, err } if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded)); err != nil { - return nil, nil, types.ValidationError(errors.Wrap(err, "verifying signature")) + return nil, nil, types.ValidationError(fmt.Errorf("verifying signature: %w", err)) } return sigObj, keyObj, nil } -func (v V001Entry) Attestation() []byte { - return nil -} - func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Hashedrekord{} re := V001Entry{} diff --git a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json index b1ef65052b..8752ae60f2 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1/hashedrekord_v0_0_1_schema.json @@ -15,11 +15,11 @@ "format": "byte" }, "publicKey" : { - "description": "The public key that can verify the signature", + "description": "The public key that can verify the signature; this can also be an X509 code signing certificate that contains the raw public key information", "type": "object", "properties": { "content": { - "description": "Specifies the content of the public key inline within the document", + "description": "Specifies the content of the public key or code signing certificate inline within the document", "type": "string", "format": "byte" } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md new file mode 100644 index 0000000000..eba7a16e01 --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/README.md @@ -0,0 +1,13 @@ +**in-toto Type Data Documentation** + +This document provides a definition for each field that is not otherwise described in the [in-toto schema](https://github.com/sigstore/rekor/blob/main/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json). This document also notes any additional information about the values associated with each field such as the format in which the data is stored and any necessary transformations. + +**Attestation:** authenticated, machine-readable metadata about one or more software artifacts. [SLSA definiton](https://github.com/slsa-framework/slsa/blob/main/controls/attestations.md) +- The Attestation value ought to be a Base64-encoded JSON object. +- The [in-toto Attestation specification](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement) provides detailed guidance on understanding and parsing this JSON object. + +**AttestationType:** Identifies the type of attestation being made, such as a provenance attestation or a vulnerability scan attestation. AttestationType's value, even when prefixed with an http, is not necessarily a working URL. + +**How do you identify an object as an in-toto object?** + +The "Body" field will include an "IntotoObj" field. diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go index 5016ce9d11..f48daacbe4 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/intoto.go @@ -17,8 +17,9 @@ package intoto import ( "context" + "errors" + "fmt" - "github.com/pkg/errors" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/types" ) @@ -63,7 +64,7 @@ func (it *BaseIntotoType) CreateProposedEntry(ctx context.Context, version strin } ei, err := it.VersionedUnmarshal(nil, version) if err != nil { - return nil, errors.Wrap(err, "fetching Intoto version implementation") + return nil, fmt.Errorf("fetching Intoto version implementation: %w", err) } return ei.CreateFromArtifactProperties(ctx, props) } diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/entry.go b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/entry.go index 66cc065f3b..49e42ccea4 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/entry.go @@ -27,6 +27,7 @@ import ( "fmt" "io/ioutil" "path/filepath" + "strings" "github.com/in-toto/in-toto-golang/in_toto" "github.com/secure-systems-lab/go-securesystemslib/dsse" @@ -42,7 +43,7 @@ import ( "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/intoto" "github.com/sigstore/sigstore/pkg/signature" - "github.com/sigstore/sigstore/pkg/signature/options" + dsse_verifier "github.com/sigstore/sigstore/pkg/signature/dsse" ) const ( @@ -72,23 +73,64 @@ func NewEntry() types.EntryImpl { func (v V001Entry) IndexKeys() ([]string, error) { var result []string - h := sha256.Sum256([]byte(v.env.Payload)) - payloadKey := "sha256:" + hex.EncodeToString(h[:]) - result = append(result, payloadKey) + // add digest over entire DSSE envelope + if v.IntotoObj.Content != nil && v.IntotoObj.Content.Hash != nil { + hashkey := strings.ToLower(fmt.Sprintf("%s:%s", swag.StringValue(v.IntotoObj.Content.Hash.Algorithm), swag.StringValue(v.IntotoObj.Content.Hash.Value))) + result = append(result, hashkey) + } else { + log.Logger.Error("could not find content digest to include in index keys") + } + + // add digest over public key + if v.keyObj != nil { + key, err := v.keyObj.CanonicalValue() + if err == nil { + keyHash := sha256.Sum256(key) + result = append(result, fmt.Sprintf("sha256:%s", strings.ToLower(hex.EncodeToString(keyHash[:])))) + + // add digest over any subjects within signing certificate + result = append(result, v.keyObj.Subjects()...) + } else { + log.Logger.Errorf("could not canonicalize public key to include in index keys: %w", err) + } + } else { + log.Logger.Error("could not find public key to include in index keys") + } + + // add digest base64-decoded payload inside of DSSE envelope + if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { + payloadHash := strings.ToLower(fmt.Sprintf("%s:%s", swag.StringValue(v.IntotoObj.Content.PayloadHash.Algorithm), swag.StringValue(v.IntotoObj.Content.PayloadHash.Value))) + result = append(result, payloadHash) + } else { + log.Logger.Error("could not find payload digest to include in index keys") + } switch v.env.PayloadType { case in_toto.PayloadType: statement, err := parseStatement(v.env.Payload) if err != nil { - return result, err + log.Logger.Errorf("error parsing payload as intoto statement: %w", err) + break } for _, s := range statement.Subject { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } + // Not all in-toto statements will contain a SLSA provenance predicate. + // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate + // for other predicates. + if predicate, err := parseSlsaPredicate(v.env.Payload); err == nil { + if predicate.Predicate.Materials != nil { + for _, s := range predicate.Predicate.Materials { + for alg, ds := range s.Digest { + result = append(result, alg+":"+ds) + } + } + } + } default: - log.Logger.Infof("Unknown in_toto Statement Type: %s", v.env.PayloadType) + log.Logger.Infof("unknown in_toto statement type (%s), cannot extract additional index keys", v.env.PayloadType) } return result, nil } @@ -105,6 +147,18 @@ func parseStatement(p string) (*in_toto.Statement, error) { return &ps, nil } +func parseSlsaPredicate(p string) (*in_toto.ProvenanceStatement, error) { + predicate := in_toto.ProvenanceStatement{} + payload, err := base64.StdEncoding.DecodeString(p) + if err != nil { + return nil, err + } + if err := json.Unmarshal(payload, &predicate); err != nil { + return nil, err + } + return &predicate, nil +} + func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { it, ok := pe.(*models.Intoto) if !ok { @@ -139,14 +193,16 @@ func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { } pkb := strfmt.Base64(pk) - h := sha256.Sum256([]byte(v.IntotoObj.Content.Envelope)) - canonicalEntry := models.IntotoV001Schema{ PublicKey: &pkb, Content: &models.IntotoV001SchemaContent{ Hash: &models.IntotoV001SchemaContentHash{ - Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), - Value: swag.String(hex.EncodeToString(h[:])), + Algorithm: v.IntotoObj.Content.Hash.Algorithm, + Value: v.IntotoObj.Content.Hash.Value, + }, + PayloadHash: &models.IntotoV001SchemaContentPayloadHash{ + Algorithm: v.IntotoObj.Content.PayloadHash.Algorithm, + Value: v.IntotoObj.Content.PayloadHash.Value, }, }, } @@ -171,66 +227,51 @@ func (v *V001Entry) validate() error { if err != nil { return err } - dsseVerifier, err := dsse.NewEnvelopeSigner(&verifier{ - v: vfr, - pub: pk, - }) - if err != nil { - return err - } + dsseVerifier := dsse_verifier.WrapVerifier(vfr) - if v.IntotoObj.Content.Envelope == "" { - return nil + if err := dsseVerifier.VerifySignature(strings.NewReader(v.IntotoObj.Content.Envelope), nil); err != nil { + return err } - if err := json.Unmarshal([]byte(v.IntotoObj.Content.Envelope), &v.env); err != nil { return err } - if _, err := dsseVerifier.Verify(&v.env); err != nil { + attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload) + if err != nil { return err } - return nil -} - -func (v *V001Entry) Attestation() []byte { - if len(v.env.Payload) > viper.GetInt("max_attestation_size") { - log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", len(v.env.Payload), viper.GetInt("max_attestation_size")) - return nil + // validation logic complete without errors, hydrate local object + attHash := sha256.Sum256(attBytes) + v.IntotoObj.Content.PayloadHash = &models.IntotoV001SchemaContentPayloadHash{ + Algorithm: swag.String(models.IntotoV001SchemaContentPayloadHashAlgorithmSha256), + Value: swag.String(hex.EncodeToString(attHash[:])), } - return []byte(v.env.Payload) -} -type verifier struct { - s signature.Signer - v signature.Verifier - pub crypto.PublicKey -} - -func (v *verifier) KeyID() (string, error) { - return "", nil -} - -func (v *verifier) Public() crypto.PublicKey { - return v.pub + h := sha256.Sum256([]byte(v.IntotoObj.Content.Envelope)) + v.IntotoObj.Content.Hash = &models.IntotoV001SchemaContentHash{ + Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), + Value: swag.String(hex.EncodeToString(h[:])), + } + return nil } -func (v *verifier) Sign(data []byte) (sig []byte, err error) { - if v.s == nil { - return nil, errors.New("nil signer") +// AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage +func (v *V001Entry) AttestationKey() string { + if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { + return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value) } - sig, err = v.s.SignMessage(bytes.NewReader(data), options.WithCryptoSignerOpts(crypto.SHA256)) - if err != nil { - return nil, err - } - return sig, nil + return "" } -func (v *verifier) Verify(data, sig []byte) error { - if v.v == nil { - return errors.New("nil verifier") +// AttestationKeyValue returns both the key and value to be persisted into attestation storage +func (v *V001Entry) AttestationKeyValue() (string, []byte) { + storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload)) + if storageSize > viper.GetInt("max_attestation_size") { + log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, viper.GetInt("max_attestation_size")) + return "", nil } - return v.v.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) + attBytes, _ := base64.StdEncoding.DecodeString(v.env.Payload) + return v.AttestationKey(), attBytes } func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { @@ -270,6 +311,11 @@ func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.A PublicKey: &kb, }, } + h := sha256.Sum256([]byte(re.IntotoObj.Content.Envelope)) + re.IntotoObj.Content.Hash = &models.IntotoV001SchemaContentHash{ + Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), + Value: swag.String(hex.EncodeToString(h[:])), + } returnVal.Spec = re.IntotoObj returnVal.APIVersion = swag.String(re.APIVersion()) diff --git a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json index a8e8c054aa..39117a6614 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json +++ b/vendor/github.com/sigstore/rekor/pkg/types/intoto/v0.0.1/intoto_v0_0_1_schema.json @@ -34,6 +34,26 @@ "value" ], "readOnly": true + }, + "payloadHash": { + "description": "Specifies the hash algorithm and value covering the payload within the DSSE envelope", + "type": "object", + "properties": { + "algorithm": { + "description": "The hashing function used to compute the hash value", + "type": "string", + "enum": [ "sha256" ] + }, + "value": { + "description": "The hash value for the envelope's payload", + "type": "string" + } + }, + "required": [ + "algorithm", + "value" + ], + "readOnly": true } } }, diff --git a/vendor/github.com/sigstore/rekor/pkg/types/test_util.go b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go index 5b91104f0a..423c3a2306 100644 --- a/vendor/github.com/sigstore/rekor/pkg/types/test_util.go +++ b/vendor/github.com/sigstore/rekor/pkg/types/test_util.go @@ -19,6 +19,8 @@ package types import ( "context" + "github.com/go-openapi/strfmt" + "github.com/sigstore/rekor/pkg/generated/models" ) @@ -48,10 +50,32 @@ func (u BaseUnmarshalTester) Validate() error { return nil } -func (u BaseUnmarshalTester) Attestation() []byte { - return nil +func (u BaseUnmarshalTester) AttestationKey() string { + return "" +} + +func (u BaseUnmarshalTester) AttestationKeyValue() (string, []byte) { + return "", nil } func (u BaseUnmarshalTester) CreateFromArtifactProperties(_ context.Context, _ ArtifactProperties) (models.ProposedEntry, error) { return nil, nil } + +type BaseProposedEntryTester struct{} + +func (b BaseProposedEntryTester) Kind() string { + return "nil" +} + +func (b BaseProposedEntryTester) SetKind(v string) { + +} + +func (b BaseProposedEntryTester) Validate(r strfmt.Registry) error { + return nil +} + +func (b BaseProposedEntryTester) ContextValidate(ctx context.Context, r strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go index 47ca308b81..efa7b8bb1f 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/checkpoint.go @@ -18,11 +18,10 @@ package util import ( "bytes" "encoding/base64" + "errors" "fmt" "strconv" "strings" - - "github.com/pkg/errors" ) // heavily borrowed from https://github.com/google/trillian-examples/blob/master/formats/log/checkpoint.go @@ -131,11 +130,11 @@ func CheckpointValidator(strToValidate string) bool { func (r *SignedCheckpoint) UnmarshalText(data []byte) error { s := SignedNote{} if err := s.UnmarshalText([]byte(data)); err != nil { - return errors.Wrap(err, "unmarshalling signed note") + return fmt.Errorf("unmarshalling signed note: %w", err) } c := Checkpoint{} if err := c.UnmarshalCheckpoint([]byte(s.Note)); err != nil { - return errors.Wrap(err, "unmarshalling checkpoint") + return fmt.Errorf("unmarshalling checkpoint: %w", err) } *r = SignedCheckpoint{Checkpoint: c, SignedNote: s} return nil diff --git a/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go b/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go index 82d0904b65..06c49ca23a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/pubkey.go @@ -18,8 +18,8 @@ package util import ( "context" "crypto/ecdsa" + "errors" - "github.com/pkg/errors" "github.com/sigstore/rekor/pkg/generated/client" "github.com/sigstore/rekor/pkg/generated/client/pubkey" "github.com/sigstore/sigstore/pkg/cryptoutils" diff --git a/vendor/github.com/sigstore/rekor/pkg/util/rfc3161.go b/vendor/github.com/sigstore/rekor/pkg/util/rfc3161.go deleted file mode 100644 index 8d7bbad3de..0000000000 --- a/vendor/github.com/sigstore/rekor/pkg/util/rfc3161.go +++ /dev/null @@ -1,259 +0,0 @@ -// -// Copyright 2021 The Sigstore Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "bytes" - "context" - "crypto" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "fmt" - "math/big" - "time" - - "github.com/sassoftware/relic/lib/pkcs7" - "github.com/sassoftware/relic/lib/pkcs9" - "github.com/sassoftware/relic/lib/x509tools" - "github.com/sigstore/sigstore/pkg/signature" - "github.com/sigstore/sigstore/pkg/signature/options" -) - -type GeneralName struct { - Name asn1.RawValue `asn1:"optional,tag:4"` -} - -type IssuerNameAndSerial struct { - IssuerName GeneralName - SerialNumber *big.Int -} - -type EssCertIDv2 struct { - HashAlgorithm pkix.AlgorithmIdentifier `asn1:"optional"` // SHA256 - CertHash []byte - IssuerNameAndSerial IssuerNameAndSerial `asn1:"optional"` -} - -type SigningCertificateV2 struct { - Certs []EssCertIDv2 -} - -func createSigningCertificate(certificate *x509.Certificate) ([]byte, error) { - h := sha256.Sum256(certificate.Raw) // TODO: Get from certificate, defaults to 256 - signingCert := SigningCertificateV2{ - Certs: []EssCertIDv2{{ - CertHash: h[:], - IssuerNameAndSerial: IssuerNameAndSerial{ - IssuerName: GeneralName{Name: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: certificate.RawIssuer}}, - SerialNumber: certificate.SerialNumber, - }, - }}, - } - signingCertBytes, err := asn1.Marshal(signingCert) - if err != nil { - return nil, err - } - return signingCertBytes, nil -} - -func marshalCertificates(certs []*x509.Certificate) pkcs7.RawCertificates { - c := make(pkcs7.RawCertificates, len(certs)) - for i, cert := range certs { - c[i] = asn1.RawValue{FullBytes: cert.Raw} - } - return c -} - -func getPKIXPublicKeyAlgorithm(cert x509.Certificate) (*pkix.AlgorithmIdentifier, error) { - identifier := pkix.AlgorithmIdentifier{ - Parameters: asn1.NullRawValue, - } - switch alg := cert.PublicKeyAlgorithm; alg { - case x509.RSA: - identifier.Algorithm = x509tools.OidPublicKeyRSA - case x509.ECDSA: - identifier.Algorithm = x509tools.OidPublicKeyECDSA - case x509.Ed25519: - identifier.Algorithm = asn1.ObjectIdentifier{1, 3, 101, 112} - default: - return nil, fmt.Errorf("unknown public key algorithm") - } - - return &identifier, nil -} - -type TimestampRequestOptions struct { - // The policy that the client expects the TSA to use for creating the timestamp token. - // If no policy is specified the TSA uses its default policy. - TSAPolicyOid asn1.ObjectIdentifier - - // The nonce to specify in the request. - Nonce *big.Int - - // Hash function to use when constructing the timestamp request. Defaults to SHA-256. - Hash crypto.Hash -} - -func TimestampRequestFromDigest(digest []byte, opts TimestampRequestOptions) (*pkcs9.TimeStampReq, error) { - alg, _ := x509tools.PkixDigestAlgorithm(opts.Hash) - msg := pkcs9.TimeStampReq{ - Version: 1, - MessageImprint: pkcs9.MessageImprint{ - HashAlgorithm: alg, - HashedMessage: digest, - }, - CertReq: true, - } - if opts.Nonce != nil { - msg.Nonce = opts.Nonce - } - if opts.TSAPolicyOid != nil { - msg.ReqPolicy = opts.TSAPolicyOid - } - - return &msg, nil -} - -func ParseTimestampRequest(data []byte) (*pkcs9.TimeStampReq, error) { - msg := new(pkcs9.TimeStampReq) - if rest, err := asn1.Unmarshal(data, msg); err != nil { - return nil, fmt.Errorf("error umarshalling request") - } else if len(rest) != 0 { - return nil, fmt.Errorf("error umarshalling request, trailing bytes") - } - return msg, nil -} - -func GetSigningTime(psd *pkcs7.ContentInfoSignedData) (time.Time, error) { - // See sassoftware pkcs9 package for this code extracting TSTInfo - infobytes, err := psd.Content.ContentInfo.Bytes() - if err != nil { - return time.Time{}, fmt.Errorf("unpack TSTInfo: %w", err) - } else if infobytes[0] == 0x04 { - // unwrap dummy OCTET STRING - _, err = asn1.Unmarshal(infobytes, &infobytes) - if err != nil { - return time.Time{}, fmt.Errorf("unpack TSTInfo: %w", err) - } - } - info := new(pkcs9.TSTInfo) - if _, err := asn1.Unmarshal(infobytes, info); err != nil { - return time.Time{}, fmt.Errorf("unpack TSTInfo: %w", err) - } - - return pkcs7.ParseTime(info.GenTime) -} - -func CreateRfc3161Response(ctx context.Context, req pkcs9.TimeStampReq, certChain []*x509.Certificate, signer signature.Signer) (*pkcs9.TimeStampResp, error) { - // Populate TSTInfo. - genTimeBytes, err := asn1.MarshalWithParams(time.Now(), "generalized") - if err != nil { - return nil, err - } - policy := asn1.ObjectIdentifier{1, 2, 3, 4, 1} - if req.ReqPolicy.String() != "" { - policy = req.ReqPolicy - } - - info := pkcs9.TSTInfo{ - Version: req.Version, - MessageImprint: req.MessageImprint, - // directoryName is tag 4 https://datatracker.ietf.org/doc/html/rfc3280#section-4.2.1.7 - TSA: pkcs9.GeneralName{Value: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: certChain[0].RawSubject}}, - // TODO: Ensure that every (SerialNumber, TSA name) identifies a unique token. - SerialNumber: x509tools.MakeSerial(), - GenTime: asn1.RawValue{FullBytes: genTimeBytes}, - Nonce: req.Nonce, - Policy: policy, - Extensions: req.Extensions, - } - - encoded, err := asn1.Marshal(info) - if err != nil { - return nil, err - } - contentInfo, err := pkcs7.NewContentInfo(pkcs9.OidTSTInfo, encoded) - if err != nil { - return nil, err - } - - // TODO: Does this need to match the hash algorithm in the request? - alg, _ := x509tools.PkixDigestAlgorithm(crypto.SHA256) - contentInfoBytes, _ := contentInfo.Bytes() - h := sha256.Sum256(contentInfoBytes) - - // Create SignerInfo and signature. - signingCert, err := createSigningCertificate(certChain[0]) - if err != nil { - return nil, err - } - attributes := new(pkcs7.AttributeList) - if err := attributes.Add(pkcs7.OidAttributeContentType, contentInfo.ContentType); err != nil { - return nil, err - } - if err := attributes.Add(pkcs7.OidAttributeMessageDigest, h[:]); err != nil { - return nil, err - } - if err := attributes.Add(asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 16, 2, 47}, signingCert); err != nil { - return nil, err - } - - // The signature is over the entire authenticated attributes, not just the TstInfo. - attrBytes, err := attributes.Bytes() - if err != nil { - return nil, err - } - // Get signature. - signature, err := signer.SignMessage(bytes.NewReader(attrBytes), options.WithContext(ctx)) - if err != nil { - return nil, err - } - - sigAlg, err := getPKIXPublicKeyAlgorithm(*certChain[0]) - if err != nil { - return nil, err - } - - response := pkcs9.TimeStampResp{ - Status: pkcs9.PKIStatusInfo{ - Status: 0, - }, - TimeStampToken: pkcs7.ContentInfoSignedData{ - ContentType: asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}, // id-signedData - Content: pkcs7.SignedData{ - Version: 1, - DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{alg}, - ContentInfo: contentInfo, - Certificates: marshalCertificates(certChain), - CRLs: nil, - SignerInfos: []pkcs7.SignerInfo{{ - Version: 1, - IssuerAndSerialNumber: pkcs7.IssuerAndSerial{ - IssuerName: asn1.RawValue{FullBytes: certChain[0].RawIssuer}, - SerialNumber: certChain[0].SerialNumber, - }, - DigestAlgorithm: alg, - DigestEncryptionAlgorithm: *sigAlg, - AuthenticatedAttributes: *attributes, - EncryptedDigest: signature, - }}, - }, - }, - } - return &response, nil -} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/sha.go b/vendor/github.com/sigstore/rekor/pkg/util/sha.go new file mode 100644 index 0000000000..4ea65d270c --- /dev/null +++ b/vendor/github.com/sigstore/rekor/pkg/util/sha.go @@ -0,0 +1,33 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "fmt" + "strings" +) + +// PrefixSHA sets the prefix of a sha hash to match how it is stored based on the length. +func PrefixSHA(sha string) string { + var prefix string + if !strings.HasPrefix(sha, "sha256:") && !strings.HasPrefix(sha, "sha1:") { + if len(sha) == 40 { + prefix = "sha1:" + } else { + prefix = "sha256:" + } + } + return fmt.Sprintf("%v%v", prefix, sha) +} diff --git a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go index 0cfd62fc39..344af188d8 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/signed_note.go @@ -25,10 +25,10 @@ import ( "crypto/x509" "encoding/base64" "encoding/binary" + "errors" "fmt" "strings" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" "golang.org/x/mod/sumdb/note" @@ -46,16 +46,16 @@ type SignedNote struct { func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signature.SignOption) (*note.Signature, error) { sig, err := signer.SignMessage(bytes.NewReader([]byte(s.Note)), opts) if err != nil { - return nil, errors.Wrap(err, "signing note") + return nil, fmt.Errorf("signing note: %w", err) } pk, err := signer.PublicKey() if err != nil { - return nil, errors.Wrap(err, "retrieving public key") + return nil, fmt.Errorf("retrieving public key: %w", err) } pubKeyBytes, err := x509.MarshalPKIXPublicKey(pk) if err != nil { - return nil, errors.Wrap(err, "marshalling public key") + return nil, fmt.Errorf("marshalling public key: %w", err) } pkSha := sha256.Sum256(pubKeyBytes) @@ -158,12 +158,12 @@ func (s *SignedNote) UnmarshalText(data []byte) error { for b.Scan() { var name, signature string if _, err := fmt.Fscanf(strings.NewReader(b.Text()), "\u2014 %s %s\n", &name, &signature); err != nil { - return errors.Wrap(err, "parsing signature") + return fmt.Errorf("parsing signature: %w", err) } sigBytes, err := base64.StdEncoding.DecodeString(signature) if err != nil { - return errors.Wrap(err, "decoding signature") + return fmt.Errorf("decoding signature: %w", err) } if len(sigBytes) < 5 { return errors.New("signature is too small") diff --git a/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go b/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go index 801ba704fc..d2f44c2887 100644 --- a/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go +++ b/vendor/github.com/sigstore/rekor/pkg/util/timestamp_note.go @@ -18,13 +18,12 @@ package util import ( "bytes" "encoding/base64" + "errors" "fmt" "net/url" "strconv" "strings" "time" - - "github.com/pkg/errors" ) // Signed note based timestamp responses @@ -161,11 +160,11 @@ func TimestampNoteValidator(strToValidate string) bool { func (r *SignedTimestampNote) UnmarshalText(data []byte) error { s := SignedNote{} if err := s.UnmarshalText([]byte(data)); err != nil { - return errors.Wrap(err, "unmarshalling signed note") + return fmt.Errorf("unmarshalling signed note: %w", err) } t := TimestampNote{} if err := t.UnmarshalText([]byte(s.Note)); err != nil { - return errors.Wrap(err, "unmarshalling timestamp note") + return fmt.Errorf("unmarshalling timestamp note: %w", err) } *r = SignedTimestampNote{TimestampNote: t, SignedNote: s} return nil diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go index 84dc4233fc..21c2685509 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/certificate.go @@ -76,6 +76,35 @@ func UnmarshalCertificatesFromPEM(pemBytes []byte) ([]*x509.Certificate, error) return result, nil } +// UnmarshalCertificatesFromPEMLimited extracts one or more X509 certificates from the provided +// byte slice, which is assumed to be in PEM-encoded format. Fails after a specified +// number of iterations. A reasonable limit is 10 iterations. +func UnmarshalCertificatesFromPEMLimited(pemBytes []byte, iterations int) ([]*x509.Certificate, error) { + result := []*x509.Certificate{} + remaining := pemBytes + + count := 0 + for len(remaining) > 0 { + if count == iterations { + return nil, errors.New("too many certificates specified in PEM block") + } + var certDer *pem.Block + certDer, remaining = pem.Decode(remaining) + + if certDer == nil { + return nil, errors.New("error during PEM decoding") + } + + cert, err := x509.ParseCertificate(certDer.Bytes) + if err != nil { + return nil, err + } + result = append(result, cert) + count++ + } + return result, nil +} + // LoadCertificatesFromPEM extracts one or more X509 certificates from the provided // io.Reader. func LoadCertificatesFromPEM(pem io.Reader) ([]*x509.Certificate, error) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go index 72fe1aa3a7..31011f34cf 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/password.go @@ -27,10 +27,8 @@ import ( // PassFunc is a type of function that takes a boolean (representing whether confirmation is desired) and returns the password as read, along with an error if one occurred type PassFunc func(bool) ([]byte, error) -var ( - // Read is for fuzzing - Read = readPasswordFn -) +// Read is for fuzzing +var Read = readPasswordFn // readPasswordFn reads the password from the following sources, in order of preference: // diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go index d97bf36bf6..b1a0dad05e 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/privatekey.go @@ -31,7 +31,11 @@ import ( const ( // PrivateKeyPEMType is the string "PRIVATE KEY" to be used during PEM encoding and decoding - PrivateKeyPEMType PEMType = "PRIVATE KEY" + PrivateKeyPEMType PEMType = "PRIVATE KEY" + // ECPrivateKeyPEMType is the string "EC PRIVATE KEY" used to parse SEC 1 EC private keys + ECPrivateKeyPEMType PEMType = "EC PRIVATE KEY" + // PKCS1PrivateKeyPEMType is the string "RSA PRIVATE KEY" used to parse PKCS#1-encoded private keys + PKCS1PrivateKeyPEMType PEMType = "RSA PRIVATE KEY" encryptedCosignPrivateKeyPEMType PEMType = "ENCRYPTED COSIGN PRIVATE KEY" // EncryptedSigstorePrivateKeyPEMType is the string "ENCRYPTED SIGSTORE PRIVATE KEY" to be used during PEM encoding and decoding EncryptedSigstorePrivateKeyPEMType PEMType = "ENCRYPTED SIGSTORE PRIVATE KEY" @@ -106,6 +110,10 @@ func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, switch derBlock.Type { case string(PrivateKeyPEMType): return x509.ParsePKCS8PrivateKey(derBlock.Bytes) + case string(PKCS1PrivateKeyPEMType): + return x509.ParsePKCS1PrivateKey(derBlock.Bytes) + case string(ECPrivateKeyPEMType): + return x509.ParseECPrivateKey(derBlock.Bytes) case string(EncryptedSigstorePrivateKeyPEMType), string(encryptedCosignPrivateKeyPEMType): derBytes := derBlock.Bytes if pf != nil { @@ -123,7 +131,7 @@ func UnmarshalPEMToPrivateKey(pemBytes []byte, pf PassFunc) (crypto.PrivateKey, return x509.ParsePKCS8PrivateKey(derBytes) } - return nil, fmt.Errorf("unknown PEM file type: %v", derBlock.Type) + return nil, fmt.Errorf("unknown private key PEM file type: %v", derBlock.Type) } // MarshalPrivateKeyToDER converts a crypto.PrivateKey into a PKCS8 ASN.1 DER byte slice @@ -134,7 +142,7 @@ func MarshalPrivateKeyToDER(priv crypto.PrivateKey) ([]byte, error) { return x509.MarshalPKCS8PrivateKey(priv) } -// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PEM-encoded byte slice +// MarshalPrivateKeyToPEM converts a crypto.PrivateKey into a PKCS#8 PEM-encoded byte slice func MarshalPrivateKeyToPEM(priv crypto.PrivateKey) ([]byte, error) { derBytes, err := MarshalPrivateKeyToDER(priv) if err != nil { diff --git a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go index fd0a634324..d2b94d4d93 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go +++ b/vendor/github.com/sigstore/sigstore/pkg/cryptoutils/publickey.go @@ -37,6 +37,8 @@ import ( const ( // PublicKeyPEMType is the string "PUBLIC KEY" to be used during PEM encoding and decoding PublicKeyPEMType PEMType = "PUBLIC KEY" + // PKCS1PublicKeyPEMType is the string "RSA PUBLIC KEY" used to parse PKCS#1-encoded public keys + PKCS1PublicKeyPEMType PEMType = "RSA PUBLIC KEY" ) // subjectPublicKeyInfo is used to construct a subject key ID. @@ -52,7 +54,15 @@ func UnmarshalPEMToPublicKey(pemBytes []byte) (crypto.PublicKey, error) { if derBytes == nil { return nil, errors.New("PEM decoding failed") } - return x509.ParsePKIXPublicKey(derBytes.Bytes) + switch derBytes.Type { + case string(PublicKeyPEMType): + return x509.ParsePKIXPublicKey(derBytes.Bytes) + case string(PKCS1PublicKeyPEMType): + return x509.ParsePKCS1PublicKey(derBytes.Bytes) + default: + return nil, fmt.Errorf("unknown Public key PEM file type: %v. Are you passing the correct public key?", + derBytes.Type) + } } // MarshalPublicKeyToDER converts a crypto.PublicKey into a PKIX, ASN.1 DER byte slice diff --git a/vendor/github.com/sigstore/sigstore/pkg/fulcioroots/fulcioroots.go b/vendor/github.com/sigstore/sigstore/pkg/fulcioroots/fulcioroots.go new file mode 100644 index 0000000000..4aae36f7bc --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/fulcioroots/fulcioroots.go @@ -0,0 +1,100 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fulcioroots + +import ( + "bytes" + "context" + "crypto/x509" + "errors" + "fmt" + "sync" + + "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/sigstore/sigstore/pkg/tuf" +) + +var ( + rootsOnce sync.Once + roots *x509.CertPool + intermediates *x509.CertPool + singletonRootErr error +) + +// This is the root in the fulcio project. +var fulcioTargetStr = `fulcio.crt.pem` + +// This is the v1 migrated root. +var fulcioV1TargetStr = `fulcio_v1.crt.pem` + +// This is the untrusted v1 intermediate CA certificate, used or chain building. +var fulcioV1IntermediateTargetStr = `fulcio_intermediate_v1.crt.pem` + +// Get returns the Fulcio root certificate. +func Get() (*x509.CertPool, error) { + rootsOnce.Do(func() { + roots, intermediates, singletonRootErr = initRoots() + if singletonRootErr != nil { + return + } + }) + return roots, singletonRootErr +} + +// GetIntermediates returns the Fulcio intermediate certificates. +func GetIntermediates() (*x509.CertPool, error) { + rootsOnce.Do(func() { + roots, intermediates, singletonRootErr = initRoots() + if singletonRootErr != nil { + return + } + }) + return intermediates, singletonRootErr +} + +func initRoots() (*x509.CertPool, *x509.CertPool, error) { + tufClient, err := tuf.NewFromEnv(context.Background()) + if err != nil { + return nil, nil, fmt.Errorf("initializing tuf: %w", err) + } + // Retrieve from the embedded or cached TUF root. If expired, a network + // call is made to update the root. + targets, err := tufClient.GetTargetsByMeta(tuf.Fulcio, []string{fulcioTargetStr, fulcioV1TargetStr, fulcioV1IntermediateTargetStr}) + if err != nil { + return nil, nil, fmt.Errorf("error getting targets: %w", err) + } + if len(targets) == 0 { + return nil, nil, errors.New("none of the Fulcio roots have been found") + } + rootPool := x509.NewCertPool() + intermediatePool := x509.NewCertPool() + for _, t := range targets { + certs, err := cryptoutils.UnmarshalCertificatesFromPEM(t.Target) + if err != nil { + return nil, nil, fmt.Errorf("error unmarshalling certificates: %w", err) + } + for _, cert := range certs { + // root certificates are self-signed + if bytes.Equal(cert.RawSubject, cert.RawIssuer) { + rootPool.AddCert(cert) + } else { + intermediatePool.AddCert(cert) + } + } + } + + return rootPool, intermediatePool, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go new file mode 100644 index 0000000000..e51b521c10 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oauth + +const ( + // InteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow. + InteractiveSuccessHTML = ` +Sigstore Auth + +

Sigstore Auth Successful

+

You may now close this page.

+ + +` +) diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go index 0bebad82e8..8ce570da10 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/device.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" "time" "github.com/coreos/go-oidc/v3/oidc" @@ -31,9 +32,11 @@ import ( const ( // SigstoreDeviceURL specifies the Device Code endpoint for the public good Sigstore service /* #nosec */ + // Deprecated: this constant (while correct) should not be used SigstoreDeviceURL = "https://oauth2.sigstore.dev/auth/device/code" // SigstoreTokenURL specifies the Token endpoint for the public good Sigstore service /* #nosec */ + // Deprecated: this constant (while correct) should not be used SigstoreTokenURL = "https://oauth2.sigstore.dev/auth/device/token" ) @@ -56,41 +59,66 @@ type DeviceFlowTokenGetter struct { MessagePrinter func(string) Sleeper func(time.Duration) Issuer string - CodeURL string - TokenURL string + codeURL string } // NewDeviceFlowTokenGetter creates a new DeviceFlowTokenGetter that retrieves an OIDC Identity Token using a Device Code Grant -func NewDeviceFlowTokenGetter(issuer, codeURL, tokenURL string) *DeviceFlowTokenGetter { +// Deprecated: NewDeviceFlowTokenGetter is deprecated; use NewDeviceFlowTokenGetterForIssuer() instead +func NewDeviceFlowTokenGetter(issuer, codeURL, _ string) *DeviceFlowTokenGetter { return &DeviceFlowTokenGetter{ MessagePrinter: func(s string) { fmt.Println(s) }, Sleeper: time.Sleep, Issuer: issuer, - CodeURL: codeURL, - TokenURL: tokenURL, + codeURL: codeURL, } } -func (d *DeviceFlowTokenGetter) deviceFlow(clientID, redirectURL string) (string, error) { +// NewDeviceFlowTokenGetterForIssuer creates a new DeviceFlowTokenGetter that retrieves an OIDC Identity Token using a Device Code Grant +func NewDeviceFlowTokenGetterForIssuer(issuer string) *DeviceFlowTokenGetter { + return &DeviceFlowTokenGetter{ + MessagePrinter: func(s string) { fmt.Println(s) }, + Sleeper: time.Sleep, + Issuer: issuer, + } +} + +func (d *DeviceFlowTokenGetter) deviceFlow(p *oidc.Provider, clientID, redirectURL string) (string, error) { + // require that OIDC provider support PKCE to provide sufficient security for the CLI + pkce, err := NewPKCE(p) + if err != nil { + return "", err + } + data := url.Values{ - "client_id": []string{clientID}, - "scope": []string{"openid email"}, + "client_id": []string{clientID}, + "scope": []string{"openid email"}, + "code_challenge_method": []string{pkce.Method}, + "code_challenge": []string{pkce.Challenge}, } if redirectURL != "" { // If a redirect uri is provided then use it data["redirect_uri"] = []string{redirectURL} } + codeURL, err := d.CodeURL() + if err != nil { + return "", err + } /* #nosec */ - resp, err := http.PostForm(d.CodeURL, data) + resp, err := http.PostForm(codeURL, data) if err != nil { return "", err } + defer resp.Body.Close() b, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err } + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s: %s", resp.Status, b) + } + parsed := deviceResp{} if err := json.Unmarshal(b, &parsed); err != nil { return "", err @@ -105,16 +133,19 @@ func (d *DeviceFlowTokenGetter) deviceFlow(clientID, redirectURL string) (string for { // Some providers use a secret here, we don't need for sigstore oauth one so leave it off. data := url.Values{ - "grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"}, - "device_code": []string{parsed.DeviceCode}, - "scope": []string{"openid", "email"}, + "grant_type": []string{"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": []string{parsed.DeviceCode}, + "scope": []string{"openid", "email"}, + "code_verifier": []string{pkce.Value}, } /* #nosec */ - resp, err := http.PostForm(d.TokenURL, data) + resp, err := http.PostForm(p.Endpoint().TokenURL, data) if err != nil { return "", err } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) if err != nil { return "", err @@ -144,7 +175,7 @@ func (d *DeviceFlowTokenGetter) deviceFlow(clientID, redirectURL string) (string // GetIDToken gets an OIDC ID Token from the specified provider using the device code grant flow func (d *DeviceFlowTokenGetter) GetIDToken(p *oidc.Provider, cfg oauth2.Config) (*OIDCIDToken, error) { - idToken, err := d.deviceFlow(cfg.ClientID, cfg.RedirectURL) + idToken, err := d.deviceFlow(p, cfg.ClientID, cfg.RedirectURL) if err != nil { return nil, err } @@ -164,3 +195,49 @@ func (d *DeviceFlowTokenGetter) GetIDToken(p *oidc.Provider, cfg oauth2.Config) Subject: subj, }, nil } + +// CodeURL fetches the device authorization endpoint URL from the provider's well-known configuration endpoint +func (d *DeviceFlowTokenGetter) CodeURL() (string, error) { + if d.codeURL != "" { + return d.codeURL, nil + } + + wellKnown := strings.TrimSuffix(d.Issuer, "/") + "/.well-known/openid-configuration" + /* #nosec */ + httpClient := &http.Client{ + Timeout: 3 * time.Second, + } + resp, err := httpClient.Get(wellKnown) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("unable to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("%s: %s", resp.Status, body) + } + + providerConfig := struct { + Issuer string `json:"issuer"` + DeviceEndpoint string `json:"device_authorization_endpoint"` + }{} + if err = json.Unmarshal(body, &providerConfig); err != nil { + return "", fmt.Errorf("oidc: failed to decode provider discovery object: %w", err) + } + + if d.Issuer != providerConfig.Issuer { + return "", fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", d.Issuer, providerConfig.Issuer) + } + + if providerConfig.DeviceEndpoint == "" { + return "", fmt.Errorf("oidc: device authorization endpoint not returned by provider") + } + + d.codeURL = providerConfig.DeviceEndpoint + return d.codeURL, nil +} diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go index 21be956358..c5251c3809 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go @@ -19,24 +19,14 @@ import ( "context" "encoding/json" "errors" - "fmt" - "os" "github.com/coreos/go-oidc/v3/oidc" + soauth "github.com/sigstore/sigstore/pkg/oauth" "golang.org/x/oauth2" "gopkg.in/square/go-jose.v2" ) const ( - htmlPage = ` -Sigstore Auth - -

Sigstore Auth Successful

-

You may now close this page.

- - -` - // PublicInstanceGithubAuthSubURL Default connector ids used by `oauth2.sigstore.dev` for Github PublicInstanceGithubAuthSubURL = "https://github.com/login/oauth" // PublicInstanceGoogleAuthSubURL Default connector ids used by `oauth2.sigstore.dev` for Google @@ -65,14 +55,12 @@ func ConnectorIDOpt(prov string) oauth2.AuthCodeOption { // DefaultIDTokenGetter is the default implementation. // The HTML page and message printed to the terminal can be customized. var DefaultIDTokenGetter = &InteractiveIDTokenGetter{ - MessagePrinter: func(url string) { fmt.Fprintf(os.Stderr, "Your browser will now be opened to:\n%s\n", url) }, - HTMLPage: htmlPage, + HTMLPage: soauth.InteractiveSuccessHTML, } // PublicInstanceGithubIDTokenGetter is a `oauth2.sigstore.dev` flow selecting github as an Idp // Flow is based on `DefaultIDTokenGetter` fields var PublicInstanceGithubIDTokenGetter = &InteractiveIDTokenGetter{ - MessagePrinter: DefaultIDTokenGetter.MessagePrinter, HTMLPage: DefaultIDTokenGetter.HTMLPage, ExtraAuthURLParams: []oauth2.AuthCodeOption{ConnectorIDOpt(PublicInstanceGithubAuthSubURL)}, } @@ -80,7 +68,6 @@ var PublicInstanceGithubIDTokenGetter = &InteractiveIDTokenGetter{ // PublicInstanceGoogleIDTokenGetter is a `oauth2.sigstore.dev` flow selecting github as an Idp // Flow is based on `DefaultIDTokenGetter` fields var PublicInstanceGoogleIDTokenGetter = &InteractiveIDTokenGetter{ - MessagePrinter: DefaultIDTokenGetter.MessagePrinter, HTMLPage: DefaultIDTokenGetter.HTMLPage, ExtraAuthURLParams: []oauth2.AuthCodeOption{ConnectorIDOpt(PublicInstanceGoogleAuthSubURL)}, } @@ -88,14 +75,18 @@ var PublicInstanceGoogleIDTokenGetter = &InteractiveIDTokenGetter{ // PublicInstanceMicrosoftIDTokenGetter is a `oauth2.sigstore.dev` flow selecting microsoft as an Idp // Flow is based on `DefaultIDTokenGetter` fields var PublicInstanceMicrosoftIDTokenGetter = &InteractiveIDTokenGetter{ - MessagePrinter: DefaultIDTokenGetter.MessagePrinter, HTMLPage: DefaultIDTokenGetter.HTMLPage, ExtraAuthURLParams: []oauth2.AuthCodeOption{ConnectorIDOpt(PublicInstanceMicrosoftAuthSubURL)}, } // OIDConnect requests an OIDC Identity Token from the specified issuer using the specified client credentials and TokenGetter // NOTE: If the redirectURL is empty a listener on localhost:0 is configured with '/auth/callback' as default path. -func OIDConnect(issuer string, id string, secret string, redirectURL string, tg TokenGetter) (*OIDCIDToken, error) { +func OIDConnect(issuer, id, secret, redirectURL string, tg TokenGetter) (*OIDCIDToken, error) { + // Check if it's a StaticTokenGetter since NewProvider below will make + // network calls unnecessarily and they are ignored. + if sg, ok := tg.(*StaticTokenGetter); ok { + return sg.GetIDToken(nil, oauth2.Config{}) + } provider, err := oidc.NewProvider(context.Background(), issuer) if err != nil { return nil, err diff --git a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go index dc6890263f..96db48b567 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go +++ b/vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go @@ -17,7 +17,9 @@ package oauthflow import ( "context" + "errors" "fmt" + "io" "net" "net/http" "net/url" @@ -25,7 +27,6 @@ import ( "time" "github.com/coreos/go-oidc/v3/oidc" - "github.com/pkg/errors" "github.com/segmentio/ksuid" "github.com/skratchdot/open-golang/open" "golang.org/x/oauth2" @@ -37,9 +38,10 @@ var browserOpener = open.Run // InteractiveIDTokenGetter is a type to get ID tokens for oauth flows type InteractiveIDTokenGetter struct { - MessagePrinter func(url string) HTMLPage string ExtraAuthURLParams []oauth2.AuthCodeOption + Input io.Reader + Output io.Writer } // GetIDToken gets an OIDC ID Token from the specified provider using an interactive browser session @@ -53,7 +55,7 @@ func (i *InteractiveIDTokenGetter) GetIDToken(p *oidc.Provider, cfg oauth2.Confi // starts listener using the redirect_uri, otherwise starts on ephemeral port redirectServer, redirectURL, err := startRedirectListener(stateToken, i.HTMLPage, cfg.RedirectURL, doneCh, errCh) if err != nil { - return nil, errors.Wrap(err, "starting redirect listener") + return nil, fmt.Errorf("starting redirect listener: %w", err) } defer func() { go func() { @@ -77,14 +79,14 @@ func (i *InteractiveIDTokenGetter) GetIDToken(p *oidc.Provider, cfg oauth2.Confi var code string if err := browserOpener(authCodeURL); err != nil { // Swap to the out of band flow if we can't open the browser - fmt.Fprintf(os.Stderr, "error opening browser: %v\n", err) - code = doOobFlow(&cfg, stateToken, opts) + fmt.Fprintf(i.GetOutput(), "error opening browser: %v\n", err) + code = i.doOobFlow(&cfg, stateToken, opts) } else { - fmt.Fprintf(os.Stderr, "Your browser will now be opened to:\n%s\n", authCodeURL) + fmt.Fprintf(i.GetOutput(), "Your browser will now be opened to:\n%s\n", authCodeURL) code, err = getCode(doneCh, errCh) if err != nil { - fmt.Fprintf(os.Stderr, "error getting code from local server: %v\n", err) - code = doOobFlow(&cfg, stateToken, opts) + fmt.Fprintf(i.GetOutput(), "error getting code from local server: %v\n", err) + code = i.doOobFlow(&cfg, stateToken, opts) } } token, err := cfg.Exchange(context.Background(), code, append(pkce.TokenURLOpts(), oidc.Nonce(nonce))...) @@ -125,18 +127,36 @@ func (i *InteractiveIDTokenGetter) GetIDToken(p *oidc.Provider, cfg oauth2.Confi return &returnToken, nil } -func doOobFlow(cfg *oauth2.Config, stateToken string, opts []oauth2.AuthCodeOption) string { +func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken string, opts []oauth2.AuthCodeOption) string { if cfg.RedirectURL == "" { cfg.RedirectURL = oobRedirectURI } authURL := cfg.AuthCodeURL(stateToken, opts...) - fmt.Fprintln(os.Stderr, "Go to the following link in a browser:\n\n\t", authURL) - fmt.Fprintf(os.Stderr, "Enter verification code: ") + fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL) + fmt.Fprintf(i.GetOutput(), "Enter verification code: ") var code string - fmt.Scanln(&code) + fmt.Fscanln(i.GetInput(), &code) return code } +// GetInput returns the input reader for the token getter. If one is not set, +// it defaults to stdin. +func (i *InteractiveIDTokenGetter) GetInput() io.Reader { + if i.Input == nil { + return os.Stdin + } + return i.Input +} + +// GetOutput returns the output writer for the token getter. If one is not set, +// it defaults to stderr. +func (i *InteractiveIDTokenGetter) GetOutput() io.Writer { + if i.Output == nil { + return os.Stderr + } + return i.Output +} + func startRedirectListener(state, htmlPage, redirectURL string, doneCh chan string, errCh chan error) (*http.Server, *url.URL, error) { var listener net.Listener var urlListener *url.URL @@ -148,10 +168,14 @@ func startRedirectListener(state, htmlPage, redirectURL string, doneCh chan stri return nil, nil, err } - port := listener.Addr().(*net.TCPAddr).Port + addr, ok := listener.Addr().(*net.TCPAddr) + if !ok { + return nil, nil, fmt.Errorf("listener addr is not TCPAddr") + } + urlListener = &url.URL{ Scheme: "http", - Host: fmt.Sprintf("localhost:%d", port), + Host: fmt.Sprintf("localhost:%d", addr.Port), Path: "/auth/callback", } } else { @@ -170,6 +194,9 @@ func startRedirectListener(state, htmlPage, redirectURL string, doneCh chan stri s := &http.Server{ Addr: urlListener.Host, Handler: m, + + // an arbitrary reasonable value to fix gosec lint error + ReadHeaderTimeout: 2 * time.Second, } m.HandleFunc(urlListener.Path, func(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go index a814e396ce..dc18ea3128 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/adapters.go @@ -37,7 +37,7 @@ func (a *SignerAdapter) Sign(data []byte) ([]byte, error) { } // Verify disabled `go-securesystemslib/dsse.Verifier` -func (a *SignerAdapter) Verify(data []byte, sig []byte) error { +func (a *SignerAdapter) Verify(data, sig []byte) error { return errors.New("Verify disabled") } @@ -59,7 +59,7 @@ type VerifierAdapter struct { } // Verify implements `go-securesystemslib/dsse.Verifier` -func (a *VerifierAdapter) Verify(data []byte, sig []byte) error { +func (a *VerifierAdapter) Verify(data, sig []byte) error { return a.SignatureVerifier.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go index cd6f0133af..cc1ebf0fa9 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/dsse.go @@ -86,7 +86,7 @@ func (w *wrappedVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.P } // VerifySignature verifies the signature specified in an DSSE envelope -func (w *wrappedVerifier) VerifySignature(s io.Reader, _ io.Reader, opts ...signature.VerifyOption) error { +func (w *wrappedVerifier) VerifySignature(s, _ io.Reader, opts ...signature.VerifyOption) error { sig, err := ioutil.ReadAll(s) if err != nil { return err @@ -141,7 +141,7 @@ func (w *wrappedSignerVerifier) PublicKey(opts ...signature.PublicKeyOption) (cr } // VerifySignature verifies the signature specified in an DSSE envelope -func (w *wrappedSignerVerifier) VerifySignature(s io.Reader, r io.Reader, opts ...signature.VerifyOption) error { +func (w *wrappedSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { return w.verifier.VerifySignature(s, r, opts...) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go index 20ef0e7430..73252d92f2 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/dsse/multidsse.go @@ -128,7 +128,7 @@ func (wL *wrappedMultiVerifier) PublicKey(opts ...signature.PublicKeyOption) (cr } // VerifySignature verifies the signature specified in an DSSE envelope -func (wL *wrappedMultiVerifier) VerifySignature(s io.Reader, _ io.Reader, opts ...signature.VerifyOption) error { +func (wL *wrappedMultiVerifier) VerifySignature(s, _ io.Reader, opts ...signature.VerifyOption) error { sig, err := ioutil.ReadAll(s) if err != nil { return err @@ -177,7 +177,7 @@ func (w *wrappedMultiSignerVerifier) PublicKey(opts ...signature.PublicKeyOption } // VerifySignature verifies the signature specified in an DSSE envelope -func (w *wrappedMultiSignerVerifier) VerifySignature(s io.Reader, r io.Reader, opts ...signature.VerifyOption) error { +func (w *wrappedMultiSignerVerifier) VerifySignature(s, r io.Reader, opts ...signature.VerifyOption) error { return w.verifier.VerifySignature(s, r, opts...) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go index 83c8a5d9ed..dfbd5793d1 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ecdsa.go @@ -20,17 +20,27 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "errors" + "fmt" "io" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature/options" ) +// checked on LoadSigner, LoadVerifier and SignMessage var ecdsaSupportedHashFuncs = []crypto.Hash{ crypto.SHA256, crypto.SHA512, crypto.SHA384, crypto.SHA224, +} + +// checked on VerifySignature. Supports SHA1 verification. +var ecdsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA256, + crypto.SHA512, + crypto.SHA384, + crypto.SHA224, crypto.SHA1, } @@ -128,6 +138,10 @@ func LoadECDSAVerifier(pub *ecdsa.PublicKey, hashFunc crypto.Hash) (*ECDSAVerifi return nil, errors.New("invalid ECDSA public key specified") } + if !isSupportedAlg(hashFunc, ecdsaSupportedHashFuncs) { + return nil, errors.New("invalid hash function specified") + } + return &ECDSAVerifier{ publicKey: pub, hashFunc: hashFunc, @@ -153,7 +167,7 @@ func (e ECDSAVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) // // All other options are ignored if specified. func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { - digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedHashFuncs, opts...) + digest, _, err := ComputeDigestForVerifying(message, e.hashFunc, ecdsaSupportedVerifyHashFuncs, opts...) if err != nil { return err } @@ -164,12 +178,13 @@ func (e ECDSAVerifier) VerifySignature(signature, message io.Reader, opts ...Ver sigBytes, err := io.ReadAll(signature) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } if !ecdsa.VerifyASN1(e.publicKey, digest, sigBytes) { - return errors.New("failed to verify signature") + return errors.New("invalid signature when validating ASN.1 encoded signature") } + return nil } @@ -184,11 +199,11 @@ type ECDSASignerVerifier struct { func LoadECDSASignerVerifier(priv *ecdsa.PrivateKey, hf crypto.Hash) (*ECDSASignerVerifier, error) { signer, err := LoadECDSASigner(priv, hf) if err != nil { - return nil, errors.Wrap(err, "initializing signer") + return nil, fmt.Errorf("initializing signer: %w", err) } verifier, err := LoadECDSAVerifier(&priv.PublicKey, hf) if err != nil { - return nil, errors.Wrap(err, "initializing verifier") + return nil, fmt.Errorf("initializing verifier: %w", err) } return &ECDSASignerVerifier{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go index ae3ddf96bd..23a8638ff5 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/ed25519.go @@ -20,9 +20,9 @@ import ( "crypto" "crypto/ed25519" "crypto/rand" + "errors" + "fmt" "io" - - "github.com/pkg/errors" ) var ed25519SupportedHashFuncs = []crypto.Hash{ @@ -130,7 +130,7 @@ func (e *ED25519Verifier) VerifySignature(signature, message io.Reader, _ ...Ver sigBytes, err := io.ReadAll(signature) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } if !ed25519.Verify(e.publicKey, messageBytes, sigBytes) { @@ -150,12 +150,15 @@ type ED25519SignerVerifier struct { func LoadED25519SignerVerifier(priv ed25519.PrivateKey) (*ED25519SignerVerifier, error) { signer, err := LoadED25519Signer(priv) if err != nil { - return nil, errors.Wrap(err, "initializing signer") + return nil, fmt.Errorf("initializing signer: %w", err) + } + pub, ok := priv.Public().(ed25519.PublicKey) + if !ok { + return nil, fmt.Errorf("given key is not ed25519.PublicKey") } - pub := priv.Public().(ed25519.PublicKey) verifier, err := LoadED25519Verifier(pub) if err != nil { - return nil, errors.Wrap(err, "initializing verifier") + return nil, fmt.Errorf("initializing verifier: %w", err) } return &ED25519SignerVerifier{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go index 2b07a7f81f..ac8a576a17 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/client.go @@ -22,6 +22,7 @@ import ( "crypto/rsa" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" "net/http" @@ -29,18 +30,18 @@ import ( "regexp" "time" - "github.com/ReneKroon/ttlcache/v2" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/pkg/errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/kms" + "github.com/aws/aws-sdk-go-v2/service/kms/types" + "github.com/jellydator/ttlcache/v2" "github.com/sigstore/sigstore/pkg/signature" sigkms "github.com/sigstore/sigstore/pkg/signature/kms" ) func init() { - sigkms.AddProvider(ReferenceScheme, func(_ context.Context, keyResourceID string, _ crypto.Hash, _ ...signature.RPCOption) (sigkms.SignerVerifier, error) { - return LoadSignerVerifier(keyResourceID) + sigkms.AddProvider(ReferenceScheme, func(ctx context.Context, keyResourceID string, _ crypto.Hash, _ ...signature.RPCOption) (sigkms.SignerVerifier, error) { + return LoadSignerVerifier(ctx, keyResourceID) }) } @@ -51,7 +52,7 @@ const ( ) type awsClient struct { - client *kms.KMS + client *kms.Client endpoint string keyID string alias string @@ -91,7 +92,8 @@ func ValidReference(ref string) error { return errKMSReference } -func parseReference(resourceID string) (endpoint, keyID, alias string, err error) { +// ParseReference parses an awskms-scheme URI into its constituent parts. +func ParseReference(resourceID string) (endpoint, keyID, alias string, err error) { var v []string for _, re := range allREs { v = re.FindStringSubmatch(resourceID) @@ -103,59 +105,70 @@ func parseReference(resourceID string) (endpoint, keyID, alias string, err error return } } - err = errors.Errorf("invalid awskms format %q", resourceID) + err = fmt.Errorf("invalid awskms format %q", resourceID) return } -func newAWSClient(keyResourceID string) (a *awsClient, err error) { - a = &awsClient{} - a.endpoint, a.keyID, a.alias, err = parseReference(keyResourceID) +func newAWSClient(ctx context.Context, keyResourceID string, opts ...func(*config.LoadOptions) error) (*awsClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } + a := &awsClient{} + var err error + a.endpoint, a.keyID, a.alias, err = ParseReference(keyResourceID) if err != nil { return nil, err } - err = a.setupClient() - if err != nil { + if err := a.setupClient(ctx, opts...); err != nil { return nil, err } a.keyCache = ttlcache.NewCache() a.keyCache.SetLoaderFunction(a.keyCacheLoaderFunction) a.keyCache.SkipTTLExtensionOnHit(true) - return + return a, nil } -func (a *awsClient) setupClient() (err error) { - var sess *session.Session - config := &aws.Config{} +func (a *awsClient) setupClient(ctx context.Context, opts ...func(*config.LoadOptions) error) (err error) { if a.endpoint != "" { - config.Endpoint = aws.String("https://" + a.endpoint) + opts = append(opts, config.WithEndpointResolverWithOptions( + aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + return aws.Endpoint{ + URL: "https://" + a.endpoint, + }, nil + }), + )) } if os.Getenv("AWS_TLS_INSECURE_SKIP_VERIFY") == "1" { - config.HTTPClient = &http.Client{ + opts = append(opts, config.WithHTTPClient(&http.Client{ Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}} // nolint: gosec + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint: gosec + }, + })) } - sess, err = session.NewSession(config) + + cfg, err := config.LoadDefaultConfig(ctx, opts...) if err != nil { - return errors.Wrap(err, "new aws session") + return fmt.Errorf("loading AWS config: %w", err) } - a.client = kms.New(sess) + + a.client = kms.NewFromConfig(cfg) return } type cmk struct { - KeyMetadata *kms.KeyMetadata + KeyMetadata *types.KeyMetadata PublicKey crypto.PublicKey } func (c *cmk) HashFunc() crypto.Hash { - switch *c.KeyMetadata.SigningAlgorithms[0] { - case kms.SigningAlgorithmSpecRsassaPssSha256, kms.SigningAlgorithmSpecRsassaPkcs1V15Sha256, kms.SigningAlgorithmSpecEcdsaSha256: + switch c.KeyMetadata.SigningAlgorithms[0] { + case types.SigningAlgorithmSpecRsassaPssSha256, types.SigningAlgorithmSpecRsassaPkcs1V15Sha256, types.SigningAlgorithmSpecEcdsaSha256: return crypto.SHA256 - case kms.SigningAlgorithmSpecRsassaPssSha384, kms.SigningAlgorithmSpecRsassaPkcs1V15Sha384, kms.SigningAlgorithmSpecEcdsaSha384: + case types.SigningAlgorithmSpecRsassaPssSha384, types.SigningAlgorithmSpecRsassaPkcs1V15Sha384, types.SigningAlgorithmSpecEcdsaSha384: return crypto.SHA384 - case kms.SigningAlgorithmSpecRsassaPssSha512, kms.SigningAlgorithmSpecRsassaPkcs1V15Sha512, kms.SigningAlgorithmSpecEcdsaSha512: + case types.SigningAlgorithmSpecRsassaPssSha512, types.SigningAlgorithmSpecRsassaPkcs1V15Sha512, types.SigningAlgorithmSpecEcdsaSha512: return crypto.SHA512 default: return 0 @@ -163,13 +176,25 @@ func (c *cmk) HashFunc() crypto.Hash { } func (c *cmk) Verifier() (signature.Verifier, error) { - switch *c.KeyMetadata.SigningAlgorithms[0] { - case kms.SigningAlgorithmSpecRsassaPssSha256, kms.SigningAlgorithmSpecRsassaPssSha384, kms.SigningAlgorithmSpecRsassaPssSha512: - return signature.LoadRSAPSSVerifier(c.PublicKey.(*rsa.PublicKey), c.HashFunc(), nil) - case kms.SigningAlgorithmSpecRsassaPkcs1V15Sha256, kms.SigningAlgorithmSpecRsassaPkcs1V15Sha384, kms.SigningAlgorithmSpecRsassaPkcs1V15Sha512: - return signature.LoadRSAPKCS1v15Verifier(c.PublicKey.(*rsa.PublicKey), c.HashFunc()) - case kms.SigningAlgorithmSpecEcdsaSha256, kms.SigningAlgorithmSpecEcdsaSha384, kms.SigningAlgorithmSpecEcdsaSha512: - return signature.LoadECDSAVerifier(c.PublicKey.(*ecdsa.PublicKey), c.HashFunc()) + switch c.KeyMetadata.SigningAlgorithms[0] { + case types.SigningAlgorithmSpecRsassaPssSha256, types.SigningAlgorithmSpecRsassaPssSha384, types.SigningAlgorithmSpecRsassaPssSha512: + pub, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not rsa") + } + return signature.LoadRSAPSSVerifier(pub, c.HashFunc(), nil) + case types.SigningAlgorithmSpecRsassaPkcs1V15Sha256, types.SigningAlgorithmSpecRsassaPkcs1V15Sha384, types.SigningAlgorithmSpecRsassaPkcs1V15Sha512: + pub, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not rsa") + } + return signature.LoadRSAPKCS1v15Verifier(pub, c.HashFunc()) + case types.SigningAlgorithmSpecEcdsaSha256, types.SigningAlgorithmSpecEcdsaSha384, types.SigningAlgorithmSpecEcdsaSha512: + pub, ok := c.PublicKey.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("public key is not ecdsa") + } + return signature.LoadECDSAVerifier(pub, c.HashFunc()) default: return nil, fmt.Errorf("signing algorithm unsupported") } @@ -214,8 +239,11 @@ func (a *awsClient) getCMK(ctx context.Context) (*cmk, error) { if err != nil { return nil, err } - - return c.(*cmk), nil + cmk, ok := c.(*cmk) + if !ok { + return nil, fmt.Errorf("could not parse cache value as cmk") + } + return cmk, nil } func (a *awsClient) createKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { @@ -230,28 +258,28 @@ func (a *awsClient) createKey(ctx context.Context, algorithm string) (crypto.Pub } // return error if not *kms.NotFoundException - var errNotFound *kms.NotFoundException + var errNotFound *types.NotFoundException if !errors.As(err, &errNotFound) { - return nil, errors.Wrap(err, "looking up key") + return nil, fmt.Errorf("looking up key: %w", err) } - usage := kms.KeyUsageTypeSignVerify + usage := types.KeyUsageTypeSignVerify description := "Created by Sigstore" - key, err := a.client.CreateKeyWithContext(ctx, &kms.CreateKeyInput{ - CustomerMasterKeySpec: &algorithm, - KeyUsage: &usage, + key, err := a.client.CreateKey(ctx, &kms.CreateKeyInput{ + CustomerMasterKeySpec: types.CustomerMasterKeySpec(algorithm), + KeyUsage: usage, Description: &description, }) if err != nil { - return nil, errors.Wrap(err, "creating key") + return nil, fmt.Errorf("creating key: %w", err) } - _, err = a.client.CreateAliasWithContext(ctx, &kms.CreateAliasInput{ + _, err = a.client.CreateAlias(ctx, &kms.CreateAliasInput{ AliasName: &a.alias, TargetKeyId: key.KeyMetadata.KeyId, }) if err != nil { - return nil, errors.Wrapf(err, "creating alias %q", a.alias) + return nil, fmt.Errorf("creating alias %q: %w", a.alias, err) } return a.public(ctx) @@ -269,21 +297,23 @@ func (a *awsClient) verify(ctx context.Context, sig, message io.Reader, opts ... return verifier.VerifySignature(sig, message, opts...) } -func (a *awsClient) verifyRemotely(ctx context.Context, sig []byte, digest []byte) error { +func (a *awsClient) verifyRemotely(ctx context.Context, sig, digest []byte) error { cmk, err := a.getCMK(ctx) if err != nil { return err } alg := cmk.KeyMetadata.SigningAlgorithms[0] - messageType := kms.MessageTypeDigest - _, err = a.client.VerifyWithContext(ctx, &kms.VerifyInput{ + messageType := types.MessageTypeDigest + if _, err := a.client.Verify(ctx, &kms.VerifyInput{ KeyId: &a.keyID, Message: digest, - MessageType: &messageType, + MessageType: messageType, Signature: sig, SigningAlgorithm: alg, - }) - return errors.Wrap(err, "unable to verify signature") + }); err != nil { + return fmt.Errorf("unable to verify signature: %w", err) + } + return nil } func (a *awsClient) public(ctx context.Context) (crypto.PublicKey, error) { @@ -291,7 +321,11 @@ func (a *awsClient) public(ctx context.Context) (crypto.PublicKey, error) { if err != nil { return nil, err } - return key.(*cmk).PublicKey, nil + cmk, ok := key.(*cmk) + if !ok { + return nil, fmt.Errorf("could not parse key as cmk") + } + return cmk.PublicKey, nil } func (a *awsClient) sign(ctx context.Context, digest []byte, _ crypto.Hash) ([]byte, error) { @@ -301,39 +335,39 @@ func (a *awsClient) sign(ctx context.Context, digest []byte, _ crypto.Hash) ([]b } alg := cmk.KeyMetadata.SigningAlgorithms[0] - messageType := kms.MessageTypeDigest - out, err := a.client.SignWithContext(ctx, &kms.SignInput{ + messageType := types.MessageTypeDigest + out, err := a.client.Sign(ctx, &kms.SignInput{ KeyId: &a.keyID, Message: digest, - MessageType: &messageType, + MessageType: messageType, SigningAlgorithm: alg, }) if err != nil { - return nil, errors.Wrap(err, "signing with kms") + return nil, fmt.Errorf("signing with kms: %w", err) } return out.Signature, nil } func (a *awsClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { - out, err := a.client.GetPublicKeyWithContext(ctx, &kms.GetPublicKeyInput{ + out, err := a.client.GetPublicKey(ctx, &kms.GetPublicKeyInput{ KeyId: &a.keyID, }) if err != nil { - return nil, errors.Wrap(err, "getting public key") + return nil, fmt.Errorf("getting public key: %w", err) } key, err := x509.ParsePKIXPublicKey(out.PublicKey) if err != nil { - return nil, errors.Wrap(err, "parsing public key") + return nil, fmt.Errorf("parsing public key: %w", err) } return key, nil } -func (a *awsClient) fetchKeyMetadata(ctx context.Context) (*kms.KeyMetadata, error) { - out, err := a.client.DescribeKeyWithContext(ctx, &kms.DescribeKeyInput{ +func (a *awsClient) fetchKeyMetadata(ctx context.Context) (*types.KeyMetadata, error) { + out, err := a.client.DescribeKey(ctx, &kms.DescribeKeyInput{ KeyId: &a.keyID, }) if err != nil { - return nil, errors.Wrap(err, "getting key metadata") + return nil, fmt.Errorf("getting key metadata: %w", err) } return out.KeyMetadata, nil } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go index cad52798a4..abab7e6158 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/aws/signer.go @@ -18,21 +18,22 @@ package aws import ( "context" "crypto" + "fmt" "io" - "github.com/aws/aws-sdk-go/service/kms" - "github.com/pkg/errors" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/kms/types" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" ) -var awsSupportedAlgorithms = []string{ - kms.CustomerMasterKeySpecRsa2048, - kms.CustomerMasterKeySpecRsa3072, - kms.CustomerMasterKeySpecRsa4096, - kms.CustomerMasterKeySpecEccNistP256, - kms.CustomerMasterKeySpecEccNistP384, - kms.CustomerMasterKeySpecEccNistP521, +var awsSupportedAlgorithms = []types.CustomerMasterKeySpec{ + types.CustomerMasterKeySpecRsa2048, + types.CustomerMasterKeySpecRsa3072, + types.CustomerMasterKeySpecRsa4096, + types.CustomerMasterKeySpecEccNistP256, + types.CustomerMasterKeySpecEccNistP384, + types.CustomerMasterKeySpecEccNistP521, } var awsSupportedHashFuncs = []crypto.Hash{ @@ -49,11 +50,11 @@ type SignerVerifier struct { // LoadSignerVerifier generates signatures using the specified key object in AWS KMS and hash algorithm. // // It also can verify signatures locally using the public key. hashFunc must not be crypto.Hash(0). -func LoadSignerVerifier(referenceStr string) (*SignerVerifier, error) { +func LoadSignerVerifier(ctx context.Context, referenceStr string, opts ...func(*config.LoadOptions) error) (*SignerVerifier, error) { a := &SignerVerifier{} var err error - a.client, err = newAWSClient(referenceStr) + a.client, err = newAWSClient(ctx, referenceStr, opts...) if err != nil { return nil, err } @@ -87,7 +88,7 @@ func (a *SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOp var signerOpts crypto.SignerOpts signerOpts, err = a.client.getHashFunc(ctx) if err != nil { - return nil, errors.Wrap(err, "getting fetching default hash function") + return nil, fmt.Errorf("getting fetching default hash function: %w", err) } for _, opt := range opts { opt.ApplyCryptoSignerOpts(&signerOpts) @@ -154,7 +155,7 @@ func (a *SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signatu var signerOpts crypto.SignerOpts signerOpts, err = a.client.getHashFunc(ctx) if err != nil { - return errors.Wrap(err, "getting hash func") + return fmt.Errorf("getting hash func: %w", err) } for _, opt := range opts { opt.ApplyCryptoSignerOpts(&signerOpts) @@ -170,7 +171,7 @@ func (a *SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signatu sigBytes, err := io.ReadAll(sig) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } return a.client.verifyRemotely(ctx, sigBytes, digest) } @@ -214,7 +215,7 @@ func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.Signer func (a *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { defaultHf, err := a.client.getHashFunc(ctx) if err != nil { - return nil, nil, errors.Wrap(err, "getting fetching default hash function") + return nil, nil, fmt.Errorf("getting fetching default hash function: %w", err) } csw := &cryptoSignerWrapper{ @@ -229,10 +230,14 @@ func (a *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) // SupportedAlgorithms returns the list of algorithms supported by the AWS KMS service func (*SignerVerifier) SupportedAlgorithms() []string { - return awsSupportedAlgorithms + s := make([]string, len(awsSupportedAlgorithms)) + for i := range awsSupportedAlgorithms { + s[i] = string(awsSupportedAlgorithms[i]) + } + return s } // DefaultAlgorithm returns the default algorithm for the AWS KMS service func (*SignerVerifier) DefaultAlgorithm() string { - return kms.CustomerMasterKeySpecEccNistP256 + return string(types.CustomerMasterKeySpecEccNistP256) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go index 64282cafc5..a6b7780a29 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/client.go @@ -21,14 +21,14 @@ import ( "crypto/ecdsa" "encoding/base64" "encoding/json" + "errors" "fmt" "os" "regexp" "strings" "time" - "github.com/ReneKroon/ttlcache/v2" - "github.com/pkg/errors" + "github.com/jellydator/ttlcache/v2" jose "gopkg.in/square/go-jose.v2" kvauth "github.com/Azure/azure-sdk-for-go/services/keyvault/auth" @@ -76,7 +76,7 @@ func ValidReference(ref string) error { func parseReference(resourceID string) (vaultURL, vaultName, keyName string, err error) { v := referenceRegex.FindStringSubmatch(resourceID) if len(v) != 3 { - err = errors.Errorf("invalid azurekms format %q", resourceID) + err = fmt.Errorf("invalid azurekms format %q", resourceID) return } @@ -86,6 +86,9 @@ func parseReference(resourceID string) (vaultURL, vaultName, keyName string, err } func newAzureKMS(_ context.Context, keyResourceID string) (*azureVaultClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } vaultURL, vaultName, keyName, err := parseReference(keyResourceID) if err != nil { return nil, err @@ -93,7 +96,7 @@ func newAzureKMS(_ context.Context, keyResourceID string) (*azureVaultClient, er client, err := getKeysClient() if err != nil { - return nil, errors.Wrap(err, "new azure kms client") + return nil, fmt.Errorf("new azure kms client: %w", err) } azClient := &azureVaultClient{ @@ -208,18 +211,18 @@ func (a *azureVaultClient) keyCacheLoaderFunction(key string) (data interface{}, func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey, error) { key, err := a.getKey(ctx) if err != nil { - return nil, errors.Wrap(err, "public key") + return nil, fmt.Errorf("public key: %w", err) } jwkJSON, err := json.Marshal(*key.Key) if err != nil { - return nil, errors.Wrap(err, "encoding the jsonWebKey") + return nil, fmt.Errorf("encoding the jsonWebKey: %w", err) } jwk := jose.JSONWebKey{} err = jwk.UnmarshalJSON(jwkJSON) if err != nil { - return nil, errors.Wrap(err, "decoding the jsonWebKey") + return nil, fmt.Errorf("decoding the jsonWebKey: %w", err) } pub, ok := jwk.Key.(*ecdsa.PublicKey) @@ -235,7 +238,7 @@ func (a *azureVaultClient) fetchPublicKey(ctx context.Context) (crypto.PublicKey func (a *azureVaultClient) getKey(ctx context.Context) (keyvault.KeyBundle, error) { key, err := a.client.GetKey(ctx, a.vaultURL, a.keyName, "") if err != nil { - return keyvault.KeyBundle{}, errors.Wrap(err, "public key") + return keyvault.KeyBundle{}, fmt.Errorf("public key: %w", err) } return key, err @@ -284,12 +287,12 @@ func (a *azureVaultClient) sign(ctx context.Context, hash []byte) ([]byte, error result, err := a.client.Sign(ctx, a.vaultURL, a.keyName, "", params) if err != nil { - return nil, errors.Wrap(err, "signing the payload") + return nil, fmt.Errorf("signing the payload: %w", err) } decResult, err := base64.RawURLEncoding.DecodeString(*result.Result) if err != nil { - return nil, errors.Wrap(err, "decoding the result") + return nil, fmt.Errorf("decoding the result: %w", err) } return decResult, nil @@ -304,11 +307,11 @@ func (a *azureVaultClient) verify(ctx context.Context, signature, hash []byte) e result, err := a.client.Verify(ctx, a.vaultURL, a.keyName, "", params) if err != nil { - return errors.Wrap(err, "verify") + return fmt.Errorf("verify: %w", err) } if !*result.Value { - return errors.New("Failed vault verification") + return errors.New("failed vault verification") } return nil diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go index f931689011..841fc79b04 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/azure/signer.go @@ -18,10 +18,11 @@ package azure import ( "context" "crypto" + "errors" + "fmt" "io" "math/big" - "github.com/pkg/errors" "golang.org/x/crypto/cryptobyte" "golang.org/x/crypto/cryptobyte/asn1" @@ -148,7 +149,7 @@ func (a *SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signatu sigBytes, err := io.ReadAll(sig) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } // Convert the ANS.1 Sequence to a concantenated r||s byte string diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go index 84c387a7e3..05a1c7eb09 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/client.go @@ -20,6 +20,7 @@ import ( "crypto" "crypto/ecdsa" "crypto/rsa" + "errors" "fmt" "hash/crc32" "io" @@ -28,11 +29,11 @@ import ( "time" gcpkms "cloud.google.com/go/kms/apiv1" + "google.golang.org/api/option" kmspb "google.golang.org/genproto/googleapis/cloud/kms/v1" "google.golang.org/protobuf/types/known/wrapperspb" - "github.com/ReneKroon/ttlcache/v2" - "github.com/pkg/errors" + "github.com/jellydator/ttlcache/v2" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" sigkms "github.com/sigstore/sigstore/pkg/signature/kms" @@ -84,9 +85,8 @@ type gcpClient struct { kmsClient *gcpkms.KeyManagementClient } -func newGCPClient(ctx context.Context, refStr string) (*gcpClient, error) { - var err error - if err = ValidReference(refStr); err != nil { +func newGCPClient(ctx context.Context, refStr string, opts ...option.ClientOption) (*gcpClient, error) { + if err := ValidReference(refStr); err != nil { return nil, err } @@ -99,14 +99,15 @@ func newGCPClient(ctx context.Context, refStr string) (*gcpClient, error) { refString: refStr, kvCache: ttlcache.NewCache(), } + var err error g.projectID, g.locationID, g.keyRing, g.keyName, g.version, err = parseReference(refStr) if err != nil { return nil, err } - g.kmsClient, err = gcpkms.NewKeyManagementClient(ctx) + g.kmsClient, err = gcpkms.NewKeyManagementClient(ctx, opts...) if err != nil { - return nil, errors.Wrap(err, "new gcp kms client") + return nil, fmt.Errorf("new gcp kms client: %w", err) } g.kvCache.SetLoaderFunction(g.kvCacheLoaderFunction) @@ -114,7 +115,7 @@ func newGCPClient(ctx context.Context, refStr string) (*gcpClient, error) { // prime the cache _, err = g.kvCache.Get(cacheKey) if err != nil { - return nil, errors.Wrap(err, "initializing key version from GCP KMS") + return nil, fmt.Errorf("initializing key version from GCP KMS: %w", err) } return g, nil } @@ -139,7 +140,7 @@ func ValidReference(ref string) error { func parseReference(resourceID string) (projectID, locationID, keyRing, keyName, version string, err error) { v := re.FindStringSubmatch(resourceID) if len(v) != 6 { - err = errors.Errorf("invalid gcpkms format %q", resourceID) + err = fmt.Errorf("invalid gcpkms format %q", resourceID) return } projectID, locationID, keyRing, keyName, version = v[1], v[2], v[3], v[4], v[5] @@ -203,7 +204,7 @@ func (g *gcpClient) keyVersionName(ctx context.Context) (*cryptoKeyVersion, erro // pick the key version that is enabled with the greatest version value kv, err = iterator.Next() if err != nil { - return nil, errors.Wrap(err, "unable to find an enabled key version in GCP KMS") + return nil, fmt.Errorf("unable to find an enabled key version in GCP KMS: %w", err) } } // kv is keyVersion to use @@ -213,7 +214,7 @@ func (g *gcpClient) keyVersionName(ctx context.Context) (*cryptoKeyVersion, erro pubKey, err := g.fetchPublicKey(ctx, kv.Name) if err != nil { - return nil, errors.Wrap(err, "unable to fetch public key while creating signer") + return nil, fmt.Errorf("unable to fetch public key while creating signer: %w", err) } // crv.Verifier is set here to enable storing the public key & hash algorithm together, @@ -245,7 +246,7 @@ func (g *gcpClient) keyVersionName(ctx context.Context) (*cryptoKeyVersion, erro return nil, errors.New("unknown algorithm specified by KMS") } if err != nil { - return nil, errors.Wrap(err, "initializing internal verifier") + return nil, fmt.Errorf("initializing internal verifier: %w", err) } return &crv, nil } @@ -256,7 +257,7 @@ func (g *gcpClient) fetchPublicKey(ctx context.Context, name string) (crypto.Pub // Call the API. pk, err := g.kmsClient.GetPublicKey(ctx, pkreq) if err != nil { - return nil, errors.Wrap(err, "public key") + return nil, fmt.Errorf("public key: %w", err) } return cryptoutils.UnmarshalPEMToPublicKey([]byte(pk.GetPem())) } @@ -278,7 +279,12 @@ func (g *gcpClient) getCKV() (*cryptoKeyVersion, error) { return nil, err } - return kmsVersionInt.(*cryptoKeyVersion), nil + kv, ok := kmsVersionInt.(*cryptoKeyVersion) + if !ok { + return nil, fmt.Errorf("could not parse kms version cache value as CryptoKeyVersion") + } + + return kv, nil } func (g *gcpClient) sign(ctx context.Context, digest []byte, alg crypto.Hash, crc uint32) ([]byte, error) { @@ -315,7 +321,7 @@ func (g *gcpClient) sign(ctx context.Context, digest []byte, alg crypto.Hash, cr resp, err := g.kmsClient.AsymmetricSign(ctx, &gcpSignReq) if err != nil { - return nil, errors.Wrap(err, "calling GCP AsymmetricSign") + return nil, fmt.Errorf("calling GCP AsymmetricSign: %w", err) } // Optional, but recommended: perform integrity verification on result. @@ -334,7 +340,7 @@ func (g *gcpClient) sign(ctx context.Context, digest []byte, alg crypto.Hash, cr func (g *gcpClient) public(ctx context.Context) (crypto.PublicKey, error) { crv, err := g.getCKV() if err != nil { - return nil, errors.Wrap(err, "transient error getting info from KMS") + return nil, fmt.Errorf("transient error getting info from KMS: %w", err) } return crv.Verifier.PublicKey(options.WithContext(ctx)) } @@ -342,7 +348,7 @@ func (g *gcpClient) public(ctx context.Context) (crypto.PublicKey, error) { func (g *gcpClient) verify(sig, message io.Reader, opts ...signature.VerifyOption) error { crv, err := g.getCKV() if err != nil { - return errors.Wrap(err, "transient error getting info from KMS") + return fmt.Errorf("transient error getting info from KMS: %w", err) } if err := crv.Verifier.VerifySignature(sig, message, opts...); err != nil { // key could have been rotated, clear cache and try again if we're not pinned to a version @@ -350,18 +356,18 @@ func (g *gcpClient) verify(sig, message io.Reader, opts ...signature.VerifyOptio _ = g.kvCache.Remove(cacheKey) crv, err = g.getCKV() if err != nil { - return errors.Wrap(err, "transient error getting info from KMS") + return fmt.Errorf("transient error getting info from KMS: %w", err) } return crv.Verifier.VerifySignature(sig, message, opts...) } - return errors.Wrap(err, "failed to verify for fixed version") + return fmt.Errorf("failed to verify for fixed version: %w", err) } return nil } func (g *gcpClient) createKey(ctx context.Context, algorithm string) (crypto.PublicKey, error) { if err := g.createKeyRing(ctx); err != nil { - return nil, errors.Wrap(err, "creating key ring") + return nil, fmt.Errorf("creating key ring: %w", err) } getKeyRequest := &kmspb.GetCryptoKeyRequest{ @@ -386,7 +392,7 @@ func (g *gcpClient) createKey(ctx context.Context, algorithm string) (crypto.Pub }, } if _, err := g.kmsClient.CreateCryptoKey(ctx, createKeyRequest); err != nil { - return nil, errors.Wrap(err, "creating crypto key") + return nil, fmt.Errorf("creating crypto key: %w", err) } return g.public(ctx) } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go index 17f227a2da..445f9d9a55 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/gcp/signer.go @@ -18,12 +18,13 @@ package gcp import ( "context" "crypto" + "fmt" "hash/crc32" "io" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" + "google.golang.org/api/option" ) var gcpSupportedHashFuncs = []crypto.Hash{ @@ -41,13 +42,13 @@ type SignerVerifier struct { // LoadSignerVerifier generates signatures using the specified key object in GCP KMS and hash algorithm. // // It also can verify signatures locally using the public key. hashFunc must not be crypto.Hash(0). -func LoadSignerVerifier(defaultCtx context.Context, referenceStr string) (*SignerVerifier, error) { +func LoadSignerVerifier(defaultCtx context.Context, referenceStr string, opts ...option.ClientOption) (*SignerVerifier, error) { g := &SignerVerifier{ defaultCtx: defaultCtx, } var err error - g.client, err = newGCPClient(defaultCtx, referenceStr) + g.client, err = newGCPClient(defaultCtx, referenceStr, opts...) if err != nil { return nil, err } @@ -76,7 +77,7 @@ func (g *SignerVerifier) SignMessage(message io.Reader, opts ...signature.SignOp signerOpts, err = g.client.getHashFunc() if err != nil { - return nil, errors.Wrap(err, "getting fetching default hash function") + return nil, fmt.Errorf("getting fetching default hash function: %w", err) } for _, opt := range opts { @@ -167,7 +168,7 @@ func (c cryptoSignerWrapper) Sign(_ io.Reader, digest []byte, opts crypto.Signer func (g *SignerVerifier) CryptoSigner(ctx context.Context, errFunc func(error)) (crypto.Signer, crypto.SignerOpts, error) { defaultHf, err := g.client.getHashFunc() if err != nil { - return nil, nil, errors.Wrap(err, "getting fetching default hash function") + return nil, nil, fmt.Errorf("getting fetching default hash function: %w", err) } csw := &cryptoSignerWrapper{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go index fe0a5267cb..7a82425822 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/client.go @@ -20,6 +20,7 @@ import ( "crypto" "encoding/base64" "encoding/json" + "errors" "fmt" "log" "os" @@ -28,10 +29,9 @@ import ( "strconv" "time" - "github.com/ReneKroon/ttlcache/v2" vault "github.com/hashicorp/vault/api" + "github.com/jellydator/ttlcache/v2" "github.com/mitchellh/go-homedir" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" sigkms "github.com/sigstore/sigstore/pkg/signature/kms" @@ -79,7 +79,7 @@ func parseReference(resourceID string) (keyPath string, err error) { i := referenceRegex.SubexpIndex("path") v := referenceRegex.FindStringSubmatch(resourceID) if len(v) < i+1 { - err = errors.Errorf("invalid vault format %q", resourceID) + err = fmt.Errorf("invalid vault format %q: %w", resourceID, err) return } keyPath = v[i] @@ -87,6 +87,10 @@ func parseReference(resourceID string) (keyPath string, err error) { } func newHashivaultClient(address, token, transitSecretEnginePath, keyResourceID string, keyVersion uint64) (*hashivaultClient, error) { + if err := ValidReference(keyResourceID); err != nil { + return nil, err + } + keyPath, err := parseReference(keyResourceID) if err != nil { return nil, err @@ -103,7 +107,7 @@ func newHashivaultClient(address, token, transitSecretEnginePath, keyResourceID Address: address, }) if err != nil { - return nil, errors.Wrap(err, "new vault client") + return nil, fmt.Errorf("new vault client: %w", err) } if token == "" { @@ -113,12 +117,12 @@ func newHashivaultClient(address, token, transitSecretEnginePath, keyResourceID log.Printf("VAULT_TOKEN is not set, trying to read token from file at path ~/.vault-token") homeDir, err := homedir.Dir() if err != nil { - return nil, errors.Wrap(err, "get home directory") + return nil, fmt.Errorf("get home directory: %w", err) } tokenFromFile, err := os.ReadFile(filepath.Join(homeDir, ".vault-token")) if err != nil { - return nil, errors.Wrap(err, "read .vault-token file") + return nil, fmt.Errorf("read .vault-token file: %w", err) } token = string(tokenFromFile) @@ -160,7 +164,7 @@ func oidcLogin(_ context.Context, address, path, role, token string) (string, er Address: address, }) if err != nil { - return "", errors.Wrap(err, "new vault client") + return "", fmt.Errorf("new vault client: %w", err) } loginData := map[string]interface{}{ @@ -170,7 +174,7 @@ func oidcLogin(_ context.Context, address, path, role, token string) (string, er fullpath := fmt.Sprintf("auth/%s/login", path) resp, err := client.Logical().Write(fullpath, loginData) if err != nil { - return "", errors.Wrap(err, "vault oidc login") + return "", fmt.Errorf("vault oidc login: %w", err) } return resp.TokenID() } @@ -190,34 +194,54 @@ func (h *hashivaultClient) keyCacheLoaderFunction(key string) (data interface{}, func (h *hashivaultClient) fetchPublicKey(_ context.Context) (crypto.PublicKey, error) { client := h.client.Logical() - keyResult, err := client.Read(fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath)) + path := fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath) + + keyResult, err := client.Read(path) if err != nil { - return nil, errors.Wrap(err, "public key") + return nil, fmt.Errorf("public key: %w", err) + } + + if keyResult == nil { + return nil, fmt.Errorf("could not read data from transit key path: %s", path) } keysData, hasKeys := keyResult.Data["keys"] latestVersion, hasVersion := keyResult.Data["latest_version"] if !hasKeys || !hasVersion { - return nil, errors.New("Failed to read transit key keys: corrupted response") + return nil, errors.New("failed to read transit key keys: corrupted response") } keys, ok := keysData.(map[string]interface{}) if !ok { - return nil, errors.New("Failed to read transit key keys: Invalid keys map") + return nil, errors.New("failed to read transit key keys: Invalid keys map") + } + + keyVersion, ok := latestVersion.(json.Number) + if !ok { + return nil, fmt.Errorf("format of 'latest_version' is not json.Number") } - keyVersion := latestVersion.(json.Number) keyData, ok := keys[string(keyVersion)] if !ok { - return nil, errors.New("Failed to read transit key keys: corrupted response") + return nil, errors.New("failed to read transit key keys: corrupted response") + } + + keyMap, ok := keyData.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("could not parse transit key keys data as map[string]interface{}") } - publicKeyPem, ok := keyData.(map[string]interface{})["public_key"] + publicKeyPem, ok := keyMap["public_key"] if !ok { - return nil, errors.New("Failed to read transit key keys: corrupted response") + return nil, errors.New("failed to read transit key keys: corrupted response") } - return cryptoutils.UnmarshalPEMToPublicKey([]byte(publicKeyPem.(string))) + strPublicKeyPem, ok := publicKeyPem.(string) + if !ok { + return nil, fmt.Errorf("could not parse public key pem as string") + } + + return cryptoutils.UnmarshalPEMToPublicKey([]byte(strPublicKeyPem)) } func (h *hashivaultClient) public() (crypto.PublicKey, error) { @@ -236,7 +260,7 @@ func (h hashivaultClient) sign(digest []byte, alg crypto.Hash, opts ...signature if keyVersion != "" { if _, err := strconv.ParseUint(keyVersion, 10, 64); err != nil { - return nil, errors.Wrap(err, "parsing requested key version") + return nil, fmt.Errorf("parsing requested key version: %w", err) } } @@ -246,12 +270,12 @@ func (h hashivaultClient) sign(digest []byte, alg crypto.Hash, opts ...signature "key_version": keyVersion, }) if err != nil { - return nil, errors.Wrap(err, "Transit: failed to sign payload") + return nil, fmt.Errorf("transit: failed to sign payload: %w", err) } encodedSignature, ok := signResult.Data["signature"] if !ok { - return nil, errors.New("Transit: response corrupted in-transit") + return nil, errors.New("transit: response corrupted in-transit") } return vaultDecode(encodedSignature, keyVersionUsedPtr) @@ -271,7 +295,7 @@ func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash, opts ...si // keyVersion >= 1 on verification but can be set to 0 on signing kvUint, err := strconv.ParseUint(keyVersion, 10, 64) if err != nil { - return errors.Wrap(err, "parsing requested key version") + return fmt.Errorf("parsing requested key version: %w", err) } else if kvUint == 0 { return errors.New("key version must be >= 1") } @@ -293,9 +317,8 @@ func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash, opts ...si "prehashed": alg != crypto.Hash(0), "signature": fmt.Sprintf("%s%s", vaultDataPrefix, encodedSig), }) - if err != nil { - return errors.Wrap(err, "verify") + return fmt.Errorf("verify: %w", err) } valid, ok := result.Data["valid"] @@ -305,11 +328,11 @@ func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash, opts ...si isValid, ok := valid.(bool) if !ok { - return fmt.Errorf("data type assertion for field `valid` failed: %T %#v", valid.(bool), valid.(bool)) + return fmt.Errorf("received non-bool value from 'valid' key") } if !isValid { - return errors.New("Failed vault verification") + return errors.New("failed vault verification") } return nil @@ -319,7 +342,7 @@ func (h hashivaultClient) verify(sig, digest []byte, alg crypto.Hash, opts ...si func vaultDecode(data interface{}, keyVersionUsed *string) ([]byte, error) { encoded, ok := data.(string) if !ok { - return nil, errors.New("Received non-string data") + return nil, errors.New("received non-string data") } if keyVersionUsed != nil { @@ -351,7 +374,7 @@ func (h hashivaultClient) createKey(typeStr string) (crypto.PublicKey, error) { if _, err := client.Write(fmt.Sprintf("/%s/keys/%s", h.transitSecretEnginePath, h.keyPath), map[string]interface{}{ "type": typeStr, }); err != nil { - return nil, errors.Wrap(err, "Failed to create transit key") + return nil, fmt.Errorf("failed to create transit key: %w", err) } return h.public() } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go index c68e5f5cfc..1965f317d1 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/hashivault/signer.go @@ -18,16 +18,17 @@ package hashivault import ( "context" "crypto" + "errors" + "fmt" "io" "strconv" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" ) // Taken from https://www.vaultproject.io/api/secret/transit -//nolint:revive +// nolint:revive const ( AlgorithmECDSAP256 = "ecdsa-p256" AlgorithmECDSAP384 = "ecdsa-p384" @@ -82,7 +83,7 @@ func LoadSignerVerifier(referenceStr string, hashFunc crypto.Hash, opts ...signa if keyVersion != "" { keyVersionUint, err = strconv.ParseUint(keyVersion, 10, 64) if err != nil { - return nil, errors.Wrap(err, "parsing key version") + return nil, fmt.Errorf("parsing key version: %w", err) } } @@ -168,7 +169,7 @@ func (h SignerVerifier) VerifySignature(sig, message io.Reader, opts ...signatur sigBytes, err := io.ReadAll(sig) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } return h.client.verify(sigBytes, digest, hf, opts...) diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go index 95f6d3570b..e48ceb9f26 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/kms/kms.go @@ -33,28 +33,26 @@ func (e *ProviderNotFoundError) Error() string { return fmt.Sprintf("no kms provider found for key reference: %s", e.ref) } -type providerInit func(context.Context, string, crypto.Hash, ...signature.RPCOption) (SignerVerifier, error) - -type providers struct { - providers map[string]providerInit -} +// ProviderInit is a function that initializes provider-specific SignerVerifier. +// +// It takes a provider-specific resource ID and hash function, and returns a +// SignerVerifier using that resource, or any error that was encountered. +type ProviderInit func(context.Context, string, crypto.Hash, ...signature.RPCOption) (SignerVerifier, error) // AddProvider adds the provider implementation into the local cache -func AddProvider(keyResourceID string, init providerInit) { - providersMux.providers[keyResourceID] = init +func AddProvider(keyResourceID string, init ProviderInit) { + providersMap[keyResourceID] = init } -var providersMux = &providers{ - providers: map[string]providerInit{}, -} +var providersMap = map[string]ProviderInit{} // Get returns a KMS SignerVerifier for the given resource string and hash function. // If no matching provider is found, Get returns a ProviderNotFoundError. It // also returns an error if initializing the SignerVerifier fails. func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts ...signature.RPCOption) (SignerVerifier, error) { - for ref, providerInit := range providersMux.providers { + for ref, pi := range providersMap { if strings.HasPrefix(keyResourceID, ref) { - return providerInit(ctx, keyResourceID, hashFunc, opts...) + return pi(ctx, keyResourceID, hashFunc, opts...) } } return nil, &ProviderNotFoundError{ref: keyResourceID} @@ -62,8 +60,8 @@ func Get(ctx context.Context, keyResourceID string, hashFunc crypto.Hash, opts . // SupportedProviders returns list of initialized providers func SupportedProviders() []string { - keys := make([]string, 0, len(providersMux.providers)) - for key := range providersMux.providers { + keys := make([]string, 0, len(providersMap)) + for key := range providersMap { keys = append(keys, key) } return keys diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go index 6b191f8829..6f8449eea9 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/message.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/message.go @@ -18,10 +18,9 @@ package signature import ( "crypto" crand "crypto/rand" + "errors" "fmt" "io" - - "github.com/pkg/errors" ) func isSupportedAlg(alg crypto.Hash, supportedAlgs []crypto.Hash) bool { @@ -98,7 +97,7 @@ func hashMessage(rawMessage io.Reader, hashFunc crypto.Hash) ([]byte, error) { hasher := hashFunc.New() // avoids reading entire message into memory if _, err := io.Copy(hasher, rawMessage); err != nil { - return nil, errors.Wrap(err, "hashing message") + return nil, fmt.Errorf("hashing message: %w", err) } return hasher.Sum(nil), nil } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go index 8a68eb0d9b..21875dc8c0 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/options/digest.go @@ -27,6 +27,9 @@ func (r RequestDigest) ApplyDigest(digest *[]byte) { } // WithDigest specifies that the given digest can be used by underlying signature implementations +// WARNING: When verifying a digest with ECDSA, it is trivial to craft a valid signature +// over a random message given a public key. Do not use this unles you understand the +// implications and do not need to protect against malleability. func WithDigest(digest []byte) RequestDigest { return RequestDigest{digest: digest} } diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go index 7db2ad80cb..c583684332 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/payload/payload.go @@ -26,7 +26,7 @@ import ( const CosignSignatureType = "cosign container image signature" // SimpleContainerImage describes the structure of a basic container image signature payload, as defined at: -// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format +// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format type SimpleContainerImage struct { Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go index 47b60ff651..1cac68a539 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapkcs1v15.go @@ -19,9 +19,10 @@ import ( "crypto" "crypto/rand" "crypto/rsa" + "errors" + "fmt" "io" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature/options" ) @@ -153,7 +154,7 @@ func (r RSAPKCS1v15Verifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, // // All other options are ignored if specified. func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { - digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) if err != nil { return err } @@ -164,7 +165,7 @@ func (r RSAPKCS1v15Verifier) VerifySignature(signature, message io.Reader, opts sigBytes, err := io.ReadAll(signature) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } return rsa.VerifyPKCS1v15(r.publicKey, hf, digest, sigBytes) @@ -181,11 +182,11 @@ type RSAPKCS1v15SignerVerifier struct { func LoadRSAPKCS1v15SignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash) (*RSAPKCS1v15SignerVerifier, error) { signer, err := LoadRSAPKCS1v15Signer(priv, hf) if err != nil { - return nil, errors.Wrap(err, "initializing signer") + return nil, fmt.Errorf("initializing signer: %w", err) } verifier, err := LoadRSAPKCS1v15Verifier(&priv.PublicKey, hf) if err != nil { - return nil, errors.Wrap(err, "initializing verifier") + return nil, fmt.Errorf("initializing verifier: %w", err) } return &RSAPKCS1v15SignerVerifier{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go index 00756938d5..6e52bed9ba 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/rsapss.go @@ -19,18 +19,28 @@ import ( "crypto" "crypto/rand" "crypto/rsa" + "errors" + "fmt" "io" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/signature/options" ) +// checked on LoadSigner, LoadVerifier, and SignMessage var rsaSupportedHashFuncs = []crypto.Hash{ crypto.SHA256, crypto.SHA384, crypto.SHA512, } +// checked on VerifySignature. Supports SHA1 verification. +var rsaSupportedVerifyHashFuncs = []crypto.Hash{ + crypto.SHA1, + crypto.SHA256, + crypto.SHA384, + crypto.SHA512, +} + // RSAPSSSigner is a signature.Signer that uses the RSA PSS algorithm type RSAPSSSigner struct { hashFunc crypto.Hash @@ -171,7 +181,7 @@ func (r RSAPSSVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error // // All other options are ignored if specified. func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error { - digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedHashFuncs, opts...) + digest, hf, err := ComputeDigestForVerifying(message, r.hashFunc, rsaSupportedVerifyHashFuncs, opts...) if err != nil { return err } @@ -182,7 +192,7 @@ func (r RSAPSSVerifier) VerifySignature(signature, message io.Reader, opts ...Ve sigBytes, err := io.ReadAll(signature) if err != nil { - return errors.Wrap(err, "reading signature") + return fmt.Errorf("reading signature: %w", err) } // rsa.VerifyPSS ignores pssOpts.Hash, so we don't set it @@ -207,11 +217,11 @@ type RSAPSSSignerVerifier struct { func LoadRSAPSSSignerVerifier(priv *rsa.PrivateKey, hf crypto.Hash, opts *rsa.PSSOptions) (*RSAPSSSignerVerifier, error) { signer, err := LoadRSAPSSSigner(priv, hf, opts) if err != nil { - return nil, errors.Wrap(err, "initializing signer") + return nil, fmt.Errorf("initializing signer: %w", err) } verifier, err := LoadRSAPSSVerifier(&priv.PublicKey, hf, opts) if err != nil { - return nil, errors.Wrap(err, "initializing verifier") + return nil, fmt.Errorf("initializing verifier: %w", err) } return &RSAPSSSignerVerifier{ diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go index e2fc168d3d..6dad67d081 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signer.go @@ -20,6 +20,7 @@ import ( "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" + "errors" "io" "io/ioutil" "path/filepath" @@ -28,7 +29,6 @@ import ( _ "crypto/sha256" _ "crypto/sha512" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/cryptoutils" // these ensure we have the implementations loaded diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go index b74d1dd93c..9592654edc 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go @@ -20,10 +20,10 @@ import ( "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" + "errors" "io/ioutil" "path/filepath" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/cryptoutils" ) diff --git a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go index 4adbc2a564..ea8660efcb 100644 --- a/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go +++ b/vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go @@ -20,11 +20,11 @@ import ( "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" + "errors" "io" "io/ioutil" "path/filepath" - "github.com/pkg/errors" "github.com/sigstore/sigstore/pkg/cryptoutils" ) @@ -51,6 +51,35 @@ func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, e return nil, errors.New("unsupported public key type") } +// LoadUnsafeVerifier returns a signature.Verifier based on the algorithm of the public key +// provided that will use SHA1 when computing digests for RSA and ECDSA signatures. +// +// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a +// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly. +func LoadUnsafeVerifier(publicKey crypto.PublicKey) (Verifier, error) { + switch pk := publicKey.(type) { + case *rsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid RSA public key specified") + } + return &RSAPKCS1v15Verifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case *ecdsa.PublicKey: + if pk == nil { + return nil, errors.New("invalid ECDSA public key specified") + } + return &ECDSAVerifier{ + publicKey: pk, + hashFunc: crypto.SHA1, + }, nil + case ed25519.PublicKey: + return LoadED25519Verifier(pk) + } + return nil, errors.New("unsupported public key type") +} + // LoadVerifierFromPEMFile returns a signature.Verifier based on the contents of a // file located at path. The Verifier wil use the hash function specified when computing digests. // diff --git a/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go new file mode 100644 index 0000000000..a43579cf60 --- /dev/null +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/client.go @@ -0,0 +1,702 @@ +// +// Copyright 2022 The Sigstore Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tuf + +import ( + "bytes" + "context" + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/theupdateframework/go-tuf/client" + tuf_leveldbstore "github.com/theupdateframework/go-tuf/client/leveldbstore" + "github.com/theupdateframework/go-tuf/data" + _ "github.com/theupdateframework/go-tuf/pkg/deprecated/set_ecdsa" + "github.com/theupdateframework/go-tuf/util" +) + +const ( + // DefaultRemoteRoot is the default remote TUF root location. + DefaultRemoteRoot = "https://sigstore-tuf-root.storage.googleapis.com" + + // TufRootEnv is the name of the environment variable that locates an alternate local TUF root location. + TufRootEnv = "TUF_ROOT" + + // SigstoreNoCache is the name of the environment variable that, if set, configures this code to only store root data in memory. + SigstoreNoCache = "SIGSTORE_NO_CACHE" +) + +var ( + // singletonTUF holds a single instance of TUF that will get reused on + // subsequent invocations of initializeTUF + singletonTUF *TUF + singletonTUFOnce = new(sync.Once) + singletonTUFErr error +) + +// getRemoteRoot is a var for testing. +var getRemoteRoot = func() string { return DefaultRemoteRoot } + +type TUF struct { + sync.Mutex + client *client.Client + targets targetImpl + local client.LocalStore + remote client.RemoteStore + embedded fs.FS + mirror string // location of mirror +} + +// JSON output representing the configured root status +type RootStatus struct { + Local string `json:"local"` + Remote string `json:"remote"` + Metadata map[string]MetadataStatus `json:"metadata"` + Targets []string `json:"targets"` +} + +type MetadataStatus struct { + Version int `json:"version"` + Size int `json:"len"` + Expiration string `json:"expiration"` + Error string `json:"error"` +} + +type TargetFile struct { + Target []byte + Status StatusKind +} + +type customMetadata struct { + Usage UsageKind `json:"usage"` + Status StatusKind `json:"status"` +} + +type sigstoreCustomMetadata struct { + Sigstore customMetadata `json:"sigstore"` +} + +type signedMeta struct { + Type string `json:"_type"` + Expires time.Time `json:"expires"` + Version int64 `json:"version"` +} + +// RemoteCache contains information to cache on the location of the remote +// repository. +type remoteCache struct { + Mirror string `json:"mirror"` +} + +func resetForTests() { + singletonTUFOnce = new(sync.Once) +} + +func getExpiration(metadata []byte) (*time.Time, error) { + s := &data.Signed{} + if err := json.Unmarshal(metadata, s); err != nil { + return nil, err + } + sm := &signedMeta{} + if err := json.Unmarshal(s.Signed, sm); err != nil { + return nil, err + } + return &sm.Expires, nil +} + +func getVersion(metadata []byte) (int64, error) { + s := &data.Signed{} + if err := json.Unmarshal(metadata, s); err != nil { + return 0, err + } + sm := &signedMeta{} + if err := json.Unmarshal(s.Signed, sm); err != nil { + return 0, err + } + return sm.Version, nil +} + +var isExpiredTimestamp = func(metadata []byte) bool { + expiration, err := getExpiration(metadata) + if err != nil { + return true + } + return time.Until(*expiration) <= 0 +} + +func getMetadataStatus(b []byte) (*MetadataStatus, error) { + expires, err := getExpiration(b) + if err != nil { + return nil, err + } + version, err := getVersion(b) + if err != nil { + return nil, err + } + return &MetadataStatus{ + Size: len(b), + Expiration: expires.Format(time.RFC822), + Version: int(version), + }, nil +} + +func (t *TUF) getRootStatus() (*RootStatus, error) { + local := rootCacheDir() + if noCache() { + local = "in-memory" + } + status := &RootStatus{ + Local: local, + Remote: t.mirror, + Metadata: make(map[string]MetadataStatus), + Targets: []string{}, + } + + // Get targets + targets, err := t.client.Targets() + if err != nil { + return nil, err + } + for t := range targets { + status.Targets = append(status.Targets, t) + } + + // Get metadata expiration + trustedMeta, err := t.local.GetMeta() + if err != nil { + return nil, fmt.Errorf("getting trusted meta: %w", err) + } + for role, md := range trustedMeta { + mdStatus, err := getMetadataStatus(md) + if err != nil { + status.Metadata[role] = MetadataStatus{Error: err.Error()} + continue + } + status.Metadata[role] = *mdStatus + } + + return status, nil +} + +func getRoot(meta map[string]json.RawMessage, fallback fs.FS) (json.RawMessage, error) { + if trustedRoot, ok := meta["root.json"]; ok { + return trustedRoot, nil + } + // On first initialize, there will be no root in the TUF DB, so read from embedded. + rd, ok := fallback.(fs.ReadFileFS) + if !ok { + return nil, errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + trustedRoot, err := rd.ReadFile(path.Join("repository", "root.json")) + if err != nil { + return nil, err + } + return trustedRoot, nil +} + +// GetRootStatus gets the current root status for info logging +func GetRootStatus(ctx context.Context) (*RootStatus, error) { + t, err := NewFromEnv(ctx) + if err != nil { + return nil, err + } + return t.getRootStatus() +} + +// initializeTUF creates a TUF client using the following params: +// * embed: indicates using the embedded metadata and in-memory file updates. +// When this is false, this uses a filesystem cache. +// * mirror: provides a reference to a remote GCS or HTTP mirror. +// * root: provides an external initial root.json. When this is not provided, this +// defaults to the embedded root.json. +// * embedded: An embedded filesystem that provides a trusted root and pre-downloaded +// targets in a targets/ subfolder. +// * forceUpdate: indicates checking the remote for an update, even when the local +// timestamp.json is up to date. +func initializeTUF(mirror string, root []byte, embedded fs.FS, forceUpdate bool) (*TUF, error) { + singletonTUFOnce.Do(func() { + t := &TUF{ + mirror: mirror, + embedded: embedded, + } + + t.targets = newFileImpl() + t.local, singletonTUFErr = newLocalStore() + if singletonTUFErr != nil { + return + } + + t.remote, singletonTUFErr = remoteFromMirror(t.mirror) + if singletonTUFErr != nil { + return + } + + t.client = client.NewClient(t.local, t.remote) + + trustedMeta, err := t.local.GetMeta() + if err != nil { + singletonTUFErr = fmt.Errorf("getting trusted meta: %w", err) + return + } + + // If the caller does not supply a root, then either use the root in the local store + // or default to the embedded one. + if root == nil { + root, err = getRoot(trustedMeta, t.embedded) + if err != nil { + singletonTUFErr = fmt.Errorf("getting trusted root: %w", err) + return + } + } + + if err := t.client.Init(root); err != nil { + singletonTUFErr = fmt.Errorf("unable to initialize client, local cache may be corrupt: %w", err) + return + } + + // We may already have an up-to-date local store! Check to see if it needs to be updated. + trustedTimestamp, ok := trustedMeta["timestamp.json"] + if ok && !isExpiredTimestamp(trustedTimestamp) && !forceUpdate { + // We're golden so stash the TUF object for later use + singletonTUF = t + return + } + + // Update if local is not populated or out of date. + if err := t.updateMetadataAndDownloadTargets(); err != nil { + singletonTUFErr = fmt.Errorf("updating local metadata and targets: %w", err) + return + } + + // We're golden so stash the TUF object for later use + singletonTUF = t + }) + return singletonTUF, singletonTUFErr +} + +// TODO: Remove ctx arg. +func NewFromEnv(_ context.Context) (*TUF, error) { + // Check for the current remote mirror. + mirror := getRemoteRoot() + b, err := os.ReadFile(cachedRemote(rootCacheDir())) + if err == nil { + remoteInfo := remoteCache{} + if err := json.Unmarshal(b, &remoteInfo); err == nil { + mirror = remoteInfo.Mirror + } + } + + // Initializes a new TUF object from the local cache or defaults. + return initializeTUF(mirror, nil, getEmbedded(), false) +} + +func Initialize(ctx context.Context, mirror string, root []byte) error { + // Initialize the client. Force an update with remote. + if _, err := initializeTUF(mirror, root, getEmbedded(), true); err != nil { + return err + } + + // Store the remote for later if we are caching. + if !noCache() { + remoteInfo := &remoteCache{Mirror: mirror} + b, err := json.Marshal(remoteInfo) + if err != nil { + return err + } + if err := os.WriteFile(cachedRemote(rootCacheDir()), b, 0o600); err != nil { + return fmt.Errorf("storing remote: %w", err) + } + } + return nil +} + +// Checks if the testTarget matches the valid target file metadata. +func isValidTarget(testTarget []byte, validMeta data.TargetFileMeta) bool { + localMeta, err := util.GenerateTargetFileMeta(bytes.NewReader(testTarget)) + if err != nil { + return false + } + if err := util.TargetFileMetaEqual(localMeta, validMeta); err != nil { + return false + } + return true +} + +func (t *TUF) GetTarget(name string) ([]byte, error) { + t.Lock() + defer t.Unlock() + // Get valid target metadata. Does a local verification. + validMeta, err := t.client.Target(name) + if err != nil { + return nil, fmt.Errorf("error verifying local metadata; local cache may be corrupt: %w", err) + } + targetBytes, err := t.targets.Get(name) + if err != nil { + return nil, err + } + + if !isValidTarget(targetBytes, validMeta) { + return nil, fmt.Errorf("cache contains invalid target; local cache may be corrupt") + } + + return targetBytes, nil +} + +// Get target files by a custom usage metadata tag. If there are no files found, +// use the fallback target names to fetch the targets by name. +func (t *TUF) GetTargetsByMeta(usage UsageKind, fallbacks []string) ([]TargetFile, error) { + t.Lock() + targets, err := t.client.Targets() + t.Unlock() + if err != nil { + return nil, fmt.Errorf("error getting targets: %w", err) + } + var matchedTargets []TargetFile + for name, targetMeta := range targets { + // Skip any targets that do not include custom metadata. + if targetMeta.Custom == nil { + continue + } + var scm sigstoreCustomMetadata + err := json.Unmarshal(*targetMeta.Custom, &scm) + if err != nil { + fmt.Fprintf(os.Stderr, "**Warning** Custom metadata not configured properly for target %s, skipping target\n", name) + continue + } + if scm.Sigstore.Usage == usage { + target, err := t.GetTarget(name) + if err != nil { + return nil, fmt.Errorf("error getting target %s by usage: %w", name, err) + } + matchedTargets = append(matchedTargets, TargetFile{Target: target, Status: scm.Sigstore.Status}) + } + } + if len(matchedTargets) == 0 { + for _, fallback := range fallbacks { + target, err := t.GetTarget(fallback) + if err != nil { + fmt.Fprintf(os.Stderr, "**Warning** Missing fallback target %s, skipping\n", fallback) + continue + } + matchedTargets = append(matchedTargets, TargetFile{Target: target, Status: Active}) + } + } + if len(matchedTargets) == 0 { + return matchedTargets, fmt.Errorf("no matching targets by custom metadata, fallbacks not found: %s", strings.Join(fallbacks, ", ")) + } + return matchedTargets, nil +} + +// updateClient() updates the TUF client and also caches new metadata, if needed. +func (t *TUF) updateClient() (data.TargetFiles, error) { + targets, err := t.client.Update() + if err != nil { + // Get some extra information for debugging. What was the state of the top-level + // metadata on the remote? + status := struct { + Mirror string `json:"mirror"` + Metadata map[string]MetadataStatus `json:"metadata"` + }{ + Mirror: t.mirror, + Metadata: make(map[string]MetadataStatus), + } + for _, md := range []string{"root.json", "targets.json", "snapshot.json", "timestamp.json"} { + r, _, err := t.remote.GetMeta(md) + if err != nil { + // May be missing, or failed download. + continue + } + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + continue + } + mdStatus, err := getMetadataStatus(b) + if err != nil { + continue + } + status.Metadata[md] = *mdStatus + } + b, innerErr := json.MarshalIndent(status, "", "\t") + if innerErr != nil { + return nil, innerErr + } + return nil, fmt.Errorf("error updating to TUF remote mirror: %w\nremote status:%s", err, string(b)) + } + // Success! Cache new metadata, if needed. + if noCache() { + return targets, nil + } + // Sync the on-disk cache with the metadata from the in-memory store. + tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) + diskLocal, err := tuf_leveldbstore.FileLocalStore(tufDB) + defer func() { + if diskLocal != nil { + diskLocal.Close() + } + }() + if err != nil { + return nil, fmt.Errorf("creating cached local store: %w", err) + } + if err := syncLocalMeta(t.local, diskLocal); err != nil { + return nil, err + } + // Return updated targets. + return targets, nil +} + +func (t *TUF) updateMetadataAndDownloadTargets() error { + // Download updated targets and cache new metadata and targets in ${TUF_ROOT}. + // NOTE: This only returns *updated* targets. + targetFiles, err := t.updateClient() + if err != nil { + return err + } + + // Download **newly** updated targets. + // TODO: Consider lazily downloading these -- be careful with embedded targets if so. + for name, targetMeta := range targetFiles { + if err := maybeDownloadRemoteTarget(name, targetMeta, t); err != nil { + return err + } + } + + return nil +} + +type targetDestination struct { + buf *bytes.Buffer +} + +func (t *targetDestination) Write(b []byte) (int, error) { + return t.buf.Write(b) +} + +func (t *targetDestination) Delete() error { + t.buf = &bytes.Buffer{} + return nil +} + +func maybeDownloadRemoteTarget(name string, meta data.TargetFileMeta, t *TUF) error { + // If we already have the target locally, don't bother downloading from remote storage. + if cachedTarget, err := t.targets.Get(name); err == nil { + // If the target we have stored matches the meta, use that. + if isValidTarget(cachedTarget, meta) { + return nil + } + } + + // Check if we already have the target in the embedded store. + w := bytes.Buffer{} + rd, ok := t.embedded.(fs.ReadFileFS) + if !ok { + return errors.New("fs.ReadFileFS unimplemented for embedded repo") + } + b, err := rd.ReadFile(path.Join("repository", "targets", name)) + + if err == nil { + // Unfortunately go:embed appears to somehow replace our line endings on windows, we need to switch them back. + // It should theoretically be safe to do this everywhere - but the files only seem to get mutated on Windows so + // let's only change them back there. + if runtime.GOOS == "windows" { + b = bytes.ReplaceAll(b, []byte("\r\n"), []byte("\n")) + } + + if isValidTarget(b, meta) { + if _, err := io.Copy(&w, bytes.NewReader(b)); err != nil { + return fmt.Errorf("using embedded target: %w", err) + } + } + } + + // Nope -- no local matching target, go download it. + if w.Len() == 0 { + dest := targetDestination{buf: &w} + if err := t.client.Download(name, &dest); err != nil { + return fmt.Errorf("downloading target: %w", err) + } + } + + // Set the target in the cache. + if err := t.targets.Set(name, w.Bytes()); err != nil { + return err + } + return nil +} + +func rootCacheDir() string { + rootDir := os.Getenv(TufRootEnv) + if rootDir == "" { + home, err := os.UserHomeDir() + if err != nil { + home = "" + } + return filepath.FromSlash(filepath.Join(home, ".sigstore", "root")) + } + return rootDir +} + +func cachedRemote(cacheRoot string) string { + return filepath.FromSlash(filepath.Join(cacheRoot, "remote.json")) +} + +func cachedTargetsDir(cacheRoot string) string { + return filepath.FromSlash(filepath.Join(cacheRoot, "targets")) +} + +func syncLocalMeta(from, to client.LocalStore) error { + // Copy trusted metadata in the from LocalStore into the to LocalStore. + tufLocalStoreMeta, err := from.GetMeta() + if err != nil { + return fmt.Errorf("getting metadata to sync: %w", err) + } + for k, v := range tufLocalStoreMeta { + if err := to.SetMeta(k, v); err != nil { + return fmt.Errorf("syncing local store for metadata %s", k) + } + } + return nil +} + +// Local store implementations +func newLocalStore() (client.LocalStore, error) { + local := client.MemoryLocalStore() + if noCache() { + return local, nil + } + // Otherwise populate the in-memory local store with data fetched from the cache. + tufDB := filepath.FromSlash(filepath.Join(rootCacheDir(), "tuf.db")) + diskLocal, err := tuf_leveldbstore.FileLocalStore(tufDB) + defer func() { + if diskLocal != nil { + diskLocal.Close() + } + }() + if err != nil { + return nil, fmt.Errorf("creating cached local store: %w", err) + } + // Populate the in-memory local store with data fetched from the cache. + if err := syncLocalMeta(diskLocal, local); err != nil { + return nil, err + } + return local, nil +} + +//go:embed repository +var embeddedRootRepo embed.FS + +// getEmbedded is a var for testing. +var getEmbedded = func() fs.FS { return embeddedRootRepo } + +// Target Implementations +type targetImpl interface { + Set(string, []byte) error + Get(string) ([]byte, error) +} + +func newFileImpl() targetImpl { + memTargets := &memoryCache{} + if noCache() { + return memTargets + } + // Otherwise use a disk-cache with in-memory cached targets. + return &diskCache{ + base: cachedTargetsDir(rootCacheDir()), + memory: memTargets, + } +} + +// In-memory cache for targets +type memoryCache struct { + targets map[string][]byte +} + +func (m *memoryCache) Set(p string, b []byte) error { + if m.targets == nil { + m.targets = map[string][]byte{} + } + m.targets[p] = b + return nil +} + +func (m *memoryCache) Get(p string) ([]byte, error) { + if m.targets == nil { + return nil, fmt.Errorf("no cached targets available, cannot retrieve %s", p) + } + b, ok := m.targets[p] + if !ok { + return nil, fmt.Errorf("missing cached target %s", p) + } + return b, nil +} + +// On-disk cache for targets +type diskCache struct { + // Base directory for accessing targets. + base string + // An in-memory map of targets that are kept in sync. + memory *memoryCache +} + +func (d *diskCache) Get(p string) ([]byte, error) { + // Read from the in-memory cache first. + if b, err := d.memory.Get(p); err == nil { + return b, nil + } + fp := filepath.FromSlash(filepath.Join(d.base, p)) + return os.ReadFile(fp) +} + +func (d *diskCache) Set(p string, b []byte) error { + if err := d.memory.Set(p, b); err != nil { + return err + } + if err := os.MkdirAll(d.base, 0o700); err != nil { + return fmt.Errorf("creating targets dir: %w", err) + } + fp := filepath.FromSlash(filepath.Join(d.base, p)) + return os.WriteFile(fp, b, 0o600) +} + +func noCache() bool { + b, err := strconv.ParseBool(os.Getenv(SigstoreNoCache)) + if err != nil { + return false + } + return b +} + +func remoteFromMirror(mirror string) (client.RemoteStore, error) { + // This is for compatibility with specifying a GCS bucket remote. + if _, parseErr := url.ParseRequestURI(mirror); parseErr != nil { + mirror = fmt.Sprintf("https://%s.storage.googleapis.com", mirror) + } + return client.HTTPRemoteStore(mirror, nil, nil) +} diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/policy.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/policy.go similarity index 90% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/policy.go rename to vendor/github.com/sigstore/sigstore/pkg/tuf/policy.go index f8fd45730a..055103aa11 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/policy.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/policy.go @@ -1,5 +1,5 @@ // -// Copyright 2021 The Sigstore Authors. +// Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,11 +23,11 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" + "errors" + "fmt" "sync" "time" - "github.com/pkg/errors" - cjson "github.com/secure-systems-lab/go-securesystemslib/cjson" ) @@ -77,17 +77,13 @@ type Root struct { ConsistentSnapshot bool `json:"consistent_snapshot"` } -func DefaultExpires(role string) time.Time { - // Default expires in 3 months - return time.Now().AddDate(0, 3, 0).UTC().Round(time.Second) -} - func NewRoot() *Root { return &Root{ - Type: "root", - SpecVersion: "1.0", - Version: 1, - Expires: DefaultExpires("root"), + Type: "root", + SpecVersion: "1.0", + Version: 1, + // Default expires in 3 months + Expires: time.Now().AddDate(0, 3, 0).UTC().Round(time.Second), Keys: make(map[string]*Key), Roles: make(map[string]*Role), ConsistentSnapshot: true, @@ -139,14 +135,14 @@ func (r *Root) ValidKey(key *Key, role string) (string, error) { // Returns the key ID or an error if invalid key. fulcioKeyVal, err := GetFulcioKeyVal(key) if err != nil { - return "", errors.Wrap(err, "error parsing signer key") + return "", fmt.Errorf("error parsing signer key: %w", err) } result := "" for keyid, rootKey := range r.Keys { fulcioRootKeyVal, err := GetFulcioKeyVal(rootKey) if err != nil { - return "", errors.Wrap(err, "error parsing root key") + return "", fmt.Errorf("error parsing root key: %w", err) } if fulcioKeyVal.Identity == fulcioRootKeyVal.Identity { if fulcioRootKeyVal.Issuer == "" || fulcioRootKeyVal.Issuer == fulcioKeyVal.Issuer { @@ -189,7 +185,7 @@ func (s *Signed) JSONMarshal(prefix, indent string) ([]byte, error) { func (s *Signed) AddOrUpdateSignature(key *Key, signature Signature) error { root := &Root{} if err := json.Unmarshal(s.Signed, root); err != nil { - return errors.Wrap(err, "unmarshalling root policy") + return fmt.Errorf("unmarshalling root policy: %w", err) } var err error signature.KeyID, err = root.ValidKey(key, "root") diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/root.json b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/root.json similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/root.json rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/root.json diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/artifact.pub b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/artifact.pub similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/artifact.pub rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/artifact.pub diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/ctfe.pub b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/ctfe.pub similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/ctfe.pub rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/ctfe.pub diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/fulcio.crt.pem b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/fulcio.crt.pem similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/fulcio.crt.pem rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/fulcio.crt.pem diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/fulcio_v1.crt.pem b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/fulcio_v1.crt.pem similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/fulcio_v1.crt.pem rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/fulcio_v1.crt.pem diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.0.pub b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.0.pub similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.0.pub rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.0.pub diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/rekor.json b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.json similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/rekor.json rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.json diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.pub b/vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.pub similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/repository/targets/rekor.pub rename to vendor/github.com/sigstore/sigstore/pkg/tuf/repository/targets/rekor.pub diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/signer.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/signer.go similarity index 88% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/signer.go rename to vendor/github.com/sigstore/sigstore/pkg/tuf/signer.go index cff672712a..9074f04cb6 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/signer.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/signer.go @@ -1,5 +1,5 @@ // -// Copyright 2021 The Sigstore Authors. +// Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,16 +24,14 @@ const ( KeySchemeFulcio = "https://fulcio.sigstore.dev" ) -var ( - KeyAlgorithms = []string{"sha256", "sha512"} -) +var KeyAlgorithms = []string{"sha256", "sha512"} type FulcioKeyVal struct { Identity string `json:"identity"` Issuer string `json:"issuer,omitempty"` } -func FulcioVerificationKey(email string, issuer string) *Key { +func FulcioVerificationKey(email, issuer string) *Key { keyValBytes, _ := json.Marshal(FulcioKeyVal{Identity: email, Issuer: issuer}) return &Key{ Type: KeyTypeFulcio, diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/status_type.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/status_type.go similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/status_type.go rename to vendor/github.com/sigstore/sigstore/pkg/tuf/status_type.go diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/testutils.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/testutils.go similarity index 93% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/testutils.go rename to vendor/github.com/sigstore/sigstore/pkg/tuf/testutils.go index 729af872db..c3e9d97be4 100644 --- a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/testutils.go +++ b/vendor/github.com/sigstore/sigstore/pkg/tuf/testutils.go @@ -1,5 +1,5 @@ // -// Copyright 2021 The Sigstore Authors. +// Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -58,7 +58,7 @@ func NewSigstoreTufRepo(t *testing.T, root TestSigstoreRoot) (tuf.LocalStore, *t } } targetsPath := filepath.Join(td, "staged", "targets") - if err := os.MkdirAll(filepath.Dir(targetsPath), 0755); err != nil { + if err := os.MkdirAll(filepath.Dir(targetsPath), 0o755); err != nil { t.Error(err) } // Add the rekor key target @@ -72,7 +72,7 @@ func NewSigstoreTufRepo(t *testing.T, root TestSigstoreRoot) (tuf.LocalStore, *t } rekorPath := "rekor.pub" rekorData := cryptoutils.PEMEncode(cryptoutils.PublicKeyPEMType, b) - if err := ioutil.WriteFile(filepath.Join(targetsPath, rekorPath), rekorData, 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(targetsPath, rekorPath), rekorData, 0o600); err != nil { t.Error(err) } scmRekor, err := json.Marshal(&sigstoreCustomMetadata{Sigstore: customMetadata{Usage: Rekor, Status: Active}}) @@ -85,7 +85,7 @@ func NewSigstoreTufRepo(t *testing.T, root TestSigstoreRoot) (tuf.LocalStore, *t // Add Fulcio Certificate information. fulcioPath := "fulcio.crt.pem" fulcioData := cryptoutils.PEMEncode(cryptoutils.CertificatePEMType, root.FulcioCertificate.Raw) - if err := ioutil.WriteFile(filepath.Join(targetsPath, fulcioPath), fulcioData, 0600); err != nil { + if err := ioutil.WriteFile(filepath.Join(targetsPath, fulcioPath), fulcioData, 0o600); err != nil { t.Error(err) } scmFulcio, err := json.Marshal(&sigstoreCustomMetadata{Sigstore: customMetadata{Usage: Fulcio, Status: Active}}) @@ -119,8 +119,12 @@ func NewSigstoreTufRepo(t *testing.T, root TestSigstoreRoot) (tuf.LocalStore, *t if !ok { t.Error(err) } + resetForTests() if err := Initialize(ctx, s.URL, rootBytes); err != nil { t.Error(err) } + t.Cleanup(func() { + resetForTests() + }) return remote, r } diff --git a/vendor/github.com/sigstore/cosign/pkg/cosign/tuf/usage_type.go b/vendor/github.com/sigstore/sigstore/pkg/tuf/usage_type.go similarity index 100% rename from vendor/github.com/sigstore/cosign/pkg/cosign/tuf/usage_type.go rename to vendor/github.com/sigstore/sigstore/pkg/tuf/usage_type.go diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 02d3e3715a..130c427e8b 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -16,7 +16,7 @@ endif # Dependency versions GOTESTSUM_VERSION = 1.8.0 -GOLANGCI_VERSION = 1.45.2 +GOLANGCI_VERSION = 1.49.0 # Add the ability to override some variables # Use with care @@ -48,7 +48,7 @@ bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint bin/golangci-lint-${GOLANGCI_VERSION}: @mkdir -p bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} @mv bin/golangci-lint "$@" .PHONY: lint diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index c14e8927a1..5701422c8e 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -119,7 +119,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %w \n", err)) + panic(fmt.Errorf("fatal error config file: %w", err)) } ``` @@ -447,6 +447,13 @@ viper.SetConfigType("json") // because there is no file extension in a stream of err := viper.ReadRemoteConfig() ``` +#### etcd3 +```go +viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" +err := viper.ReadRemoteConfig() +``` + #### Consul You need to set a key to Consul key/value storage with JSON value containing your desired config. For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: @@ -594,7 +601,7 @@ configuration level. Viper can access array indices by using numbers in the path. For example: -```json +```jsonc { "host": { "address": "localhost", @@ -622,7 +629,7 @@ GetInt("host.ports.1") // returns 6029 Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. -```json +```jsonc { "datastore.metric.host": "0.0.0.0", "host": { diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go index 0115067ae6..a64e1446cc 100644 --- a/vendor/github.com/spf13/viper/logger.go +++ b/vendor/github.com/spf13/viper/logger.go @@ -7,8 +7,8 @@ import ( ) // Logger is a unified interface for various logging use cases and practices, including: -// - leveled logging -// - structured logging +// - leveled logging +// - structured logging type Logger interface { // Trace logs a Trace event. // diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index ee7a86d9df..64e657505d 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -64,18 +64,25 @@ func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { return nm } +func insensitiviseVal(val interface{}) interface{} { + switch val.(type) { + case map[interface{}]interface{}: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]interface{})) + case map[string]interface{}: + // nested map: recursively insensitivise + insensitiviseMap(val.(map[string]interface{})) + case []interface{}: + // nested array: recursively insensitivise + insensitiveArray(val.([]interface{})) + } + return val +} + func insensitiviseMap(m map[string]interface{}) { for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } - + val = insensitiviseVal(val) lower := strings.ToLower(key) if key != lower { // remove old key (not lower-cased) @@ -86,6 +93,12 @@ func insensitiviseMap(m map[string]interface{}) { } } +func insensitiveArray(a []interface{}) { + for i, val := range a { + a[i] = insensitiviseVal(val) + } +} + func absPathify(logger Logger, inPath string) string { logger.Info("trying to resolve absolute path", "path", inPath) diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index a3812e92f3..5f76cc0959 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -132,10 +132,10 @@ type DecoderConfigOption func(*mapstructure.DecoderConfig) // DecodeHook returns a DecoderConfigOption which overrides the default // DecoderConfig.DecodeHook value, the default is: // -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) +// mapstructure.ComposeDecodeHookFunc( +// mapstructure.StringToTimeDurationHookFunc(), +// mapstructure.StringToSliceHookFunc(","), +// ) func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { return func(c *mapstructure.DecoderConfig) { c.DecodeHook = hook @@ -156,18 +156,18 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // // For example, if values from the following sources were loaded: // -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } // // The resulting config will have the following values: // @@ -300,7 +300,7 @@ func NewWithOptions(opts ...Option) *Viper { func Reset() { v = New() SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} + SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"} } // TODO: make this lazy initialization instead @@ -419,7 +419,7 @@ type RemoteProvider interface { var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} // SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} +var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"} func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { @@ -573,7 +573,7 @@ func (v *Viper) AddConfigPath(in string) { // AddRemoteProvider adds a remote configuration source. // Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. +// provider is a string value: "etcd", "etcd3", "consul" or "firestore" are currently supported. // endpoint is the url. etcd requires http://ip:port consul requires ip:port // path is the path in the k/v store to retrieve configuration // To retrieve a config file called myapp.json from /configs/myapp.json @@ -604,7 +604,7 @@ func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { // AddSecureRemoteProvider adds a remote configuration source. // Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. +// provider is a string value: "etcd", "etcd3", "consul" or "firestore" are currently supported. // endpoint is the url. etcd requires http://ip:port consul requires ip:port // secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg // path is the path in the k/v store to retrieve configuration @@ -785,7 +785,8 @@ func (v *Viper) searchMapWithPathPrefixes( // isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere // on its path in the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map +// +// "foo.bar.baz" in a lower-priority map func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { var parentVal interface{} for i := 1; i < len(path); i++ { @@ -810,7 +811,8 @@ func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) // isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere // in a sub-path of the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map +// +// "foo.bar.baz" in a lower-priority map func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { // unify input map var m map[string]interface{} @@ -835,7 +837,8 @@ func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { // isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere // in the environment, when automatic env is on. // e.g., if "foo.bar" has a value in the environment, it “shadows” -// "foo.bar.baz" in a lower-priority map +// +// "foo.bar.baz" in a lower-priority map func (v *Viper) isPathShadowedInAutoEnv(path []string) string { var parentKey string for i := 1; i < len(path); i++ { @@ -856,11 +859,11 @@ func (v *Viper) isPathShadowedInAutoEnv(path []string) string { // would return a string slice for the key if the key's type is inferred by // the default value and the Get function would return: // -// []string {"a", "b", "c"} +// []string {"a", "b", "c"} // // Otherwise the Get function would return: // -// "a b c" +// "a b c" func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } func (v *Viper) SetTypeByDefaultValue(enable bool) { @@ -988,6 +991,13 @@ func (v *Viper) GetUint(key string) uint { return cast.ToUint(v.Get(key)) } +// GetUint16 returns the value associated with the key as an unsigned integer. +func GetUint16(key string) uint16 { return v.GetUint16(key) } + +func (v *Viper) GetUint16(key string) uint16 { + return cast.ToUint16(v.Get(key)) +} + // GetUint32 returns the value associated with the key as an unsigned integer. func GetUint32(key string) uint32 { return v.GetUint32(key) } @@ -1137,9 +1147,8 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { // BindPFlag binds a specific key to a pflag (as used by cobra). // Example (where serverCmd is a Cobra instance): // -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { @@ -1870,6 +1879,10 @@ func (v *Viper) getKeyValueConfig() error { return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") } + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { val, err := v.getRemoteConfig(rp) if err != nil { @@ -1896,6 +1909,10 @@ func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{} // Retrieve the first found remote configuration. func (v *Viper) watchKeyValueConfigOnChannel() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { respc, _ := RemoteConfig.WatchChannel(rp) // Todo: Add quit channel @@ -1913,9 +1930,15 @@ func (v *Viper) watchKeyValueConfigOnChannel() error { // Retrieve the first found remote configuration. func (v *Viper) watchKeyValueConfig() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + for _, rp := range v.remoteProviders { val, err := v.watchRemoteConfig(rp) if err != nil { + v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) + continue } v.kvstore = val @@ -1958,9 +1981,10 @@ func (v *Viper) AllKeys() []string { // flattenAndMergeMap recursively flattens the given map into a map[string]bool // of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. +// - each path is merged into a single key string, delimited with v.keyDelim +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// // The resulting set of paths is merged to the given shadow set at the same time. func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { if shadow != nil && prefix != "" && shadow[prefix] { @@ -2111,14 +2135,17 @@ func (v *Viper) getConfigFile() (string, error) { // Debug prints all configuration registries for debugging // purposes. -func Debug() { v.Debug() } - -func (v *Viper) Debug() { - fmt.Printf("Aliases:\n%#v\n", v.aliases) - fmt.Printf("Override:\n%#v\n", v.override) - fmt.Printf("PFlags:\n%#v\n", v.pflags) - fmt.Printf("Env:\n%#v\n", v.env) - fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) - fmt.Printf("Config:\n%#v\n", v.config) - fmt.Printf("Defaults:\n%#v\n", v.defaults) +func Debug() { v.Debug() } +func DebugTo(w io.Writer) { v.DebugTo(w) } + +func (v *Viper) Debug() { v.DebugTo(os.Stdout) } + +func (v *Viper) DebugTo(w io.Writer) { + fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases) + fmt.Fprintf(w, "Override:\n%#v\n", v.override) + fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags) + fmt.Fprintf(w, "Env:\n%#v\n", v.env) + fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore) + fmt.Fprintf(w, "Config:\n%#v\n", v.config) + fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults) } diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go index b7a83be24e..7b1186e1fd 100644 --- a/vendor/github.com/subosito/gotenv/gotenv.go +++ b/vendor/github.com/subosito/gotenv/gotenv.go @@ -124,6 +124,7 @@ func Read(filename string) (Env, error) { if err != nil { return nil, err } + defer f.Close() return strictParse(f, false) } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go index 823be93f93..d5ecf721bd 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go @@ -32,8 +32,7 @@ func newErrBatchCorrupted(reason string) error { const ( batchHeaderLen = 8 + 4 - batchGrowRec = 3000 - batchBufioSize = 16 + batchGrowLimit = 3000 ) // BatchReplay wraps basic batch operations. @@ -59,10 +58,6 @@ func (index batchIndex) v(data []byte) []byte { return nil } -func (index batchIndex) kv(data []byte) (key, value []byte) { - return index.k(data), index.v(data) -} - // Batch is a write batch. type Batch struct { data []byte @@ -70,14 +65,24 @@ type Batch struct { // internalLen is sums of key/value pair length plus 8-bytes internal key. internalLen int + + // growLimit is the threshold in order to slow down the memory allocation + // for batch when the number of accumulated entries exceeds value. + // + // batchGrowLimit is used as the default threshold if it's not configured. + growLimit int } func (b *Batch) grow(n int) { o := len(b.data) if cap(b.data)-o < n { + limit := batchGrowLimit + if b.growLimit > 0 { + limit = b.growLimit + } div := 1 - if len(b.index) > batchGrowRec { - div = len(b.index) / batchGrowRec + if len(b.index) > limit { + div = len(b.index) / limit } ndata := make([]byte, o, o+n+o/div) copy(ndata, b.data) @@ -223,17 +228,6 @@ func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { return nil } -func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { - var ik []byte - for i, index := range b.index { - ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) - if err := mdb.Delete(ik); err != nil { - return err - } - } - return nil -} - func newBatch() interface{} { return &Batch{} } @@ -243,6 +237,42 @@ func MakeBatch(n int) *Batch { return &Batch{data: make([]byte, 0, n)} } +// BatchConfig contains the config options for batch. +type BatchConfig struct { + // InitialCapacity is the batch initial capacity to preallocate. + // + // The default value is 0. + InitialCapacity int + + // GrowLimit is the limit (in terms of entry) of how much buffer + // can grow each cycle. + // + // Initially the buffer will grow twice its current size until + // GrowLimit threshold is reached, after that the buffer will grow + // up to GrowLimit each cycle. This buffer grow size in bytes is + // loosely calculated from average entry size multiplied by GrowLimit. + // + // Generally, the memory allocation step is larger if this value + // is configured large, vice versa. + // + // The default value is 3000. + GrowLimit int +} + +// MakeBatchWithConfig initializes a batch object with the given configs. +func MakeBatchWithConfig(config *BatchConfig) *Batch { + var batch = new(Batch) + if config != nil { + if config.InitialCapacity > 0 { + batch.data = make([]byte, 0, config.InitialCapacity) + } + if config.GrowLimit > 0 { + batch.growLimit = config.GrowLimit + } + } + return batch +} + func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { var index batchIndex for i, o := 0, 0; o < len(data); i++ { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go index c36ad32359..8e4f397ce3 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go @@ -8,6 +8,7 @@ package cache import ( + "sort" "sync" "sync/atomic" "unsafe" @@ -32,18 +33,9 @@ type Cacher interface { // Evict evicts the 'cache node'. Evict(n *Node) - - // EvictNS evicts 'cache node' with the given namespace. - EvictNS(ns uint64) - - // EvictAll evicts all 'cache node'. - EvictAll() - - // Close closes the 'cache tree' - Close() error } -// Value is a 'cacheable object'. It may implements util.Releaser, if +// Value is a 'cache-able object'. It may implements util.Releaser, if // so the the Release method will be called once object is released. type Value interface{} @@ -69,32 +61,76 @@ const ( mOverflowGrowThreshold = 1 << 7 ) +const ( + bucketUninitialized = iota + bucketInitialized + bucketFrozen +) + +type mNodes []*Node + +func (x mNodes) Len() int { return len(x) } +func (x mNodes) Less(i, j int) bool { + a, b := x[i].ns, x[j].ns + if a == b { + return x[i].key < x[j].key + } + return a < b +} +func (x mNodes) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x mNodes) sort() { sort.Sort(x) } + +func (x mNodes) search(ns, key uint64) int { + return sort.Search(len(x), func(i int) bool { + a := x[i].ns + if a == ns { + return x[i].key >= key + } + return a > ns + }) +} + type mBucket struct { - mu sync.Mutex - node []*Node - frozen bool + mu sync.Mutex + nodes mNodes + state int8 } -func (b *mBucket) freeze() []*Node { +func (b *mBucket) freeze() mNodes { b.mu.Lock() defer b.mu.Unlock() - if !b.frozen { - b.frozen = true + if b.state == bucketInitialized { + b.state = bucketFrozen + } else if b.state == bucketUninitialized { + panic("BUG: freeze uninitialized bucket") } - return b.node + return b.nodes } -func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { +func (b *mBucket) frozen() bool { + if b.state == bucketFrozen { + return true + } + if b.state == bucketUninitialized { + panic("BUG: accessing uninitialized bucket") + } + return false +} + +func (b *mBucket) get(r *Cache, h *mHead, hash uint32, ns, key uint64, getOnly bool) (done, created bool, n *Node) { b.mu.Lock() - if b.frozen { + if b.frozen() { b.mu.Unlock() return } - // Scan the node. - for _, n := range b.node { - if n.hash == hash && n.ns == ns && n.key == key { + // Find the node. + i := b.nodes.search(ns, key) + if i < len(b.nodes) { + n = b.nodes[i] + if n.ns == ns && n.key == key { atomic.AddInt32(&n.ref, 1) b.mu.Unlock() return true, false, n @@ -102,7 +138,7 @@ func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset boo } // Get only. - if noset { + if getOnly { b.mu.Unlock() return true, false, nil } @@ -116,99 +152,106 @@ func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset boo ref: 1, } // Add node to bucket. - b.node = append(b.node, n) - bLen := len(b.node) + if i == len(b.nodes) { + b.nodes = append(b.nodes, n) + } else { + b.nodes = append(b.nodes[:i+1], b.nodes[i:]...) + b.nodes[i] = n + } + bLen := len(b.nodes) b.mu.Unlock() // Update counter. - grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold + grow := atomic.AddInt64(&r.statNodes, 1) >= h.growThreshold if bLen > mOverflowThreshold { grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold } // Grow. - if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + if grow && atomic.CompareAndSwapInt32(&h.resizeInProgress, 0, 1) { nhLen := len(h.buckets) << 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), + nh := &mHead{ + buckets: make([]mBucket, nhLen), mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), + predecessor: unsafe.Pointer(h), + growThreshold: int64(nhLen * mOverflowThreshold), + shrinkThreshold: int64(nhLen >> 1), } ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) if !ok { panic("BUG: failed swapping head") } + atomic.AddInt32(&r.statGrow, 1) go nh.initBuckets() } return true, true, n } -func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { +func (b *mBucket) delete(r *Cache, h *mHead, hash uint32, ns, key uint64) (done, deleted bool) { b.mu.Lock() - if b.frozen { + if b.frozen() { b.mu.Unlock() return } - // Scan the node. - var ( - n *Node - bLen int - ) - for i := range b.node { - n = b.node[i] - if n.ns == ns && n.key == key { - if atomic.LoadInt32(&n.ref) == 0 { - deleted = true + // Find the node. + i := b.nodes.search(ns, key) + if i == len(b.nodes) { + b.mu.Unlock() + return true, false + } + n := b.nodes[i] + var bLen int + if n.ns == ns && n.key == key { + if atomic.LoadInt32(&n.ref) == 0 { + deleted = true + // Save and clear value. + if n.value != nil { // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil + if r, ok := n.value.(util.Releaser); ok { + r.Release() } - - // Remove node from bucket. - b.node = append(b.node[:i], b.node[i+1:]...) - bLen = len(b.node) + n.value = nil } - break + + // Remove node from bucket. + b.nodes = append(b.nodes[:i], b.nodes[i+1:]...) + bLen = len(b.nodes) } } b.mu.Unlock() if deleted { - // Call OnDel. - for _, f := range n.onDel { + // Call delete funcs. + for _, f := range n.delFuncs { f() } // Update counter. - atomic.AddInt32(&r.size, int32(n.size)*-1) - shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold + atomic.AddInt64(&r.statSize, int64(n.size)*-1) + shrink := atomic.AddInt64(&r.statNodes, -1) < h.shrinkThreshold if bLen >= mOverflowThreshold { atomic.AddInt32(&h.overflow, -1) } // Shrink. - if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { + if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgress, 0, 1) { nhLen := len(h.buckets) >> 1 - nh := &mNode{ - buckets: make([]unsafe.Pointer, nhLen), + nh := &mHead{ + buckets: make([]mBucket, nhLen), mask: uint32(nhLen) - 1, - pred: unsafe.Pointer(h), - growThreshold: int32(nhLen * mOverflowThreshold), - shrinkThreshold: int32(nhLen >> 1), + predecessor: unsafe.Pointer(h), + growThreshold: int64(nhLen * mOverflowThreshold), + shrinkThreshold: int64(nhLen >> 1), } ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) if !ok { panic("BUG: failed swapping head") } + atomic.AddInt32(&r.statShrink, 1) go nh.initBuckets() } } @@ -216,95 +259,134 @@ func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, return true, deleted } -type mNode struct { - buckets []unsafe.Pointer // []*mBucket - mask uint32 - pred unsafe.Pointer // *mNode - resizeInProgess int32 +type mHead struct { + buckets []mBucket + mask uint32 + predecessor unsafe.Pointer // *mNode + resizeInProgress int32 overflow int32 - growThreshold int32 - shrinkThreshold int32 + growThreshold int64 + shrinkThreshold int64 } -func (n *mNode) initBucket(i uint32) *mBucket { - if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { +func (h *mHead) initBucket(i uint32) *mBucket { + b := &h.buckets[i] + b.mu.Lock() + if b.state >= bucketInitialized { + b.mu.Unlock() return b } - p := (*mNode)(atomic.LoadPointer(&n.pred)) - if p != nil { - var node []*Node - if n.mask > p.mask { - // Grow. - pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) - if pb == nil { - pb = p.initBucket(i & p.mask) - } - m := pb.freeze() - // Split nodes. - for _, x := range m { - if x.hash&n.mask == i { - node = append(node, x) - } - } - } else { - // Shrink. - pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) - if pb0 == nil { - pb0 = p.initBucket(i) - } - pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) - if pb1 == nil { - pb1 = p.initBucket(i + uint32(len(n.buckets))) - } - m0 := pb0.freeze() - m1 := pb1.freeze() - // Merge nodes. - node = make([]*Node, 0, len(m0)+len(m1)) - node = append(node, m0...) - node = append(node, m1...) - } - b := &mBucket{node: node} - if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { - if len(node) > mOverflowThreshold { - atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) + p := (*mHead)(atomic.LoadPointer(&h.predecessor)) + if p == nil { + panic("BUG: uninitialized bucket doesn't have predecessor") + } + + var nodes mNodes + if h.mask > p.mask { + // Grow. + m := p.initBucket(i & p.mask).freeze() + // Split nodes. + for _, x := range m { + if x.hash&h.mask == i { + nodes = append(nodes, x) } - return b } + } else { + // Shrink. + m0 := p.initBucket(i).freeze() + m1 := p.initBucket(i + uint32(len(h.buckets))).freeze() + // Merge nodes. + nodes = make(mNodes, 0, len(m0)+len(m1)) + nodes = append(nodes, m0...) + nodes = append(nodes, m1...) + nodes.sort() + } + b.nodes = nodes + b.state = bucketInitialized + b.mu.Unlock() + return b +} + +func (h *mHead) initBuckets() { + for i := range h.buckets { + h.initBucket(uint32(i)) } + atomic.StorePointer(&h.predecessor, nil) +} - return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) +func (h *mHead) enumerateNodesWithCB(f func([]*Node)) { + var nodes []*Node + for x := range h.buckets { + b := h.initBucket(uint32(x)) + + b.mu.Lock() + nodes = append(nodes, b.nodes...) + b.mu.Unlock() + f(nodes) + } } -func (n *mNode) initBuckets() { - for i := range n.buckets { - n.initBucket(uint32(i)) +func (h *mHead) enumerateNodesByNS(ns uint64) []*Node { + var nodes []*Node + for x := range h.buckets { + b := h.initBucket(uint32(x)) + + b.mu.Lock() + i := b.nodes.search(ns, 0) + for ; i < len(b.nodes); i++ { + n := b.nodes[i] + if n.ns != ns { + break + } + nodes = append(nodes, n) + } + b.mu.Unlock() } - atomic.StorePointer(&n.pred, nil) + return nodes +} + +type Stats struct { + Buckets int + Nodes int64 + Size int64 + GrowCount int32 + ShrinkCount int32 + HitCount int64 + MissCount int64 + SetCount int64 + DelCount int64 } // Cache is a 'cache map'. type Cache struct { mu sync.RWMutex mHead unsafe.Pointer // *mNode - nodes int32 - size int32 cacher Cacher closed bool + + statNodes int64 + statSize int64 + statGrow int32 + statShrink int32 + statHit int64 + statMiss int64 + statSet int64 + statDel int64 } // NewCache creates a new 'cache map'. The cacher is optional and // may be nil. func NewCache(cacher Cacher) *Cache { - h := &mNode{ - buckets: make([]unsafe.Pointer, mInitialSize), + h := &mHead{ + buckets: make([]mBucket, mInitialSize), mask: mInitialSize - 1, - growThreshold: int32(mInitialSize * mOverflowThreshold), + growThreshold: int64(mInitialSize * mOverflowThreshold), shrinkThreshold: 0, } for i := range h.buckets { - h.buckets[i] = unsafe.Pointer(&mBucket{}) + h.buckets[i].state = bucketInitialized } r := &Cache{ mHead: unsafe.Pointer(h), @@ -313,14 +395,20 @@ func NewCache(cacher Cacher) *Cache { return r } -func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { - h := (*mNode)(atomic.LoadPointer(&r.mHead)) +func (r *Cache) getBucket(hash uint32) (*mHead, *mBucket) { + h := (*mHead)(atomic.LoadPointer(&r.mHead)) i := hash & h.mask - b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) - if b == nil { - b = h.initBucket(i) - } - return h, b + return h, h.initBucket(i) +} + +func (r *Cache) enumerateNodesWithCB(f func([]*Node)) { + h := (*mHead)(atomic.LoadPointer(&r.mHead)) + h.enumerateNodesWithCB(f) +} + +func (r *Cache) enumerateNodesByNS(ns uint64) []*Node { + h := (*mHead)(atomic.LoadPointer(&r.mHead)) + return h.enumerateNodesByNS(ns) } func (r *Cache) delete(n *Node) bool { @@ -333,14 +421,29 @@ func (r *Cache) delete(n *Node) bool { } } +// GetStats returns cache statistics. +func (r *Cache) GetStats() Stats { + return Stats{ + Buckets: len((*mHead)(atomic.LoadPointer(&r.mHead)).buckets), + Nodes: atomic.LoadInt64(&r.statNodes), + Size: atomic.LoadInt64(&r.statSize), + GrowCount: atomic.LoadInt32(&r.statGrow), + ShrinkCount: atomic.LoadInt32(&r.statShrink), + HitCount: atomic.LoadInt64(&r.statHit), + MissCount: atomic.LoadInt64(&r.statMiss), + SetCount: atomic.LoadInt64(&r.statSet), + DelCount: atomic.LoadInt64(&r.statDel), + } +} + // Nodes returns number of 'cache node' in the map. func (r *Cache) Nodes() int { - return int(atomic.LoadInt32(&r.nodes)) + return int(atomic.LoadInt64(&r.statNodes)) } // Size returns sums of 'cache node' size in the map. func (r *Cache) Size() int { - return int(atomic.LoadInt32(&r.size)) + return int(atomic.LoadInt64(&r.statSize)) } // Capacity returns cache capacity. @@ -374,14 +477,20 @@ func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Han hash := murmur32(ns, key, 0xf00) for { h, b := r.getBucket(hash) - done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) + done, created, n := b.get(r, h, hash, ns, key, setFunc == nil) if done { + if created || n == nil { + atomic.AddInt64(&r.statMiss, 1) + } else { + atomic.AddInt64(&r.statHit, 1) + } + if n != nil { n.mu.Lock() if n.value == nil { if setFunc == nil { n.mu.Unlock() - n.unref() + n.unRefInternal(false) return nil } @@ -389,10 +498,11 @@ func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Han if n.value == nil { n.size = 0 n.mu.Unlock() - n.unref() + n.unRefInternal(false) return nil } - atomic.AddInt32(&r.size, int32(n.size)) + atomic.AddInt64(&r.statSet, 1) + atomic.AddInt64(&r.statSize, int64(n.size)) } n.mu.Unlock() if r.cacher != nil { @@ -412,11 +522,11 @@ func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Han // only attributed to the particular 'cache node', so when a 'cache node' // is recreated it will not be banned. // -// If onDel is not nil, then it will be executed if such 'cache node' +// If delFunc is not nil, then it will be executed if such 'cache node' // doesn't exist or once the 'cache node' is released. // // Delete return true is such 'cache node' exist. -func (r *Cache) Delete(ns, key uint64, onDel func()) bool { +func (r *Cache) Delete(ns, key uint64, delFunc func()) bool { r.mu.RLock() defer r.mu.RUnlock() if r.closed { @@ -429,15 +539,15 @@ func (r *Cache) Delete(ns, key uint64, onDel func()) bool { done, _, n := b.get(r, h, hash, ns, key, true) if done { if n != nil { - if onDel != nil { + if delFunc != nil { n.mu.Lock() - n.onDel = append(n.onDel, onDel) + n.delFuncs = append(n.delFuncs, delFunc) n.mu.Unlock() } if r.cacher != nil { r.cacher.Ban(n) } - n.unref() + n.unRefInternal(true) return true } @@ -445,8 +555,8 @@ func (r *Cache) Delete(ns, key uint64, onDel func()) bool { } } - if onDel != nil { - onDel() + if delFunc != nil { + delFunc() } return false @@ -472,7 +582,7 @@ func (r *Cache) Evict(ns, key uint64) bool { if r.cacher != nil { r.cacher.Evict(n) } - n.unref() + n.unRefInternal(true) return true } @@ -484,7 +594,7 @@ func (r *Cache) Evict(ns, key uint64) bool { } // EvictNS evicts 'cache node' with the given namespace. This will -// simply call Cacher.EvictNS. +// simply call Cacher.Evict on all nodes with the given namespace. func (r *Cache) EvictNS(ns uint64) { r.mu.RLock() defer r.mu.RUnlock() @@ -493,10 +603,21 @@ func (r *Cache) EvictNS(ns uint64) { } if r.cacher != nil { - r.cacher.EvictNS(ns) + nodes := r.enumerateNodesByNS(ns) + for _, n := range nodes { + r.cacher.Evict(n) + } } } +func (r *Cache) evictAll() { + r.enumerateNodesWithCB(func(nodes []*Node) { + for _, n := range nodes { + r.cacher.Evict(n) + } + }) +} + // EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll. func (r *Cache) EvictAll() { r.mu.RLock() @@ -506,66 +627,46 @@ func (r *Cache) EvictAll() { } if r.cacher != nil { - r.cacher.EvictAll() + r.evictAll() } } -// Close closes the 'cache map' and forcefully releases all 'cache node'. -func (r *Cache) Close() error { +// Close closes the 'cache map'. +// All 'Cache' method is no-op after 'cache map' is closed. +// All 'cache node' will be evicted from 'cacher'. +// +// If 'force' is true then all 'cache node' will be forcefully released +// even if the 'node ref' is not zero. +func (r *Cache) Close(force bool) { + var head *mHead + // Hold RW-lock to make sure no more in-flight operations. r.mu.Lock() if !r.closed { r.closed = true + head = (*mHead)(atomic.LoadPointer(&r.mHead)) + atomic.StorePointer(&r.mHead, nil) + } + r.mu.Unlock() - h := (*mNode)(r.mHead) - h.initBuckets() + if head != nil { + head.enumerateNodesWithCB(func(nodes []*Node) { + for _, n := range nodes { + // Zeroing ref. Prevent unRefExternal to call finalizer. + if force { + atomic.StoreInt32(&n.ref, 0) + } - for i := range h.buckets { - b := (*mBucket)(h.buckets[i]) - for _, n := range b.node { - // Call releaser. - if n.value != nil { - if r, ok := n.value.(util.Releaser); ok { - r.Release() - } - n.value = nil + // Evict from cacher. + if r.cacher != nil { + r.cacher.Evict(n) } - // Call OnDel. - for _, f := range n.onDel { - f() + if force { + n.callFinalizer() } - n.onDel = nil } - } + }) } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil -} - -// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but -// unlike Close it doesn't forcefully releases 'cache node'. -func (r *Cache) CloseWeak() error { - r.mu.Lock() - if !r.closed { - r.closed = true - } - r.mu.Unlock() - - // Avoid deadlock. - if r.cacher != nil { - r.cacher.EvictAll() - if err := r.cacher.Close(); err != nil { - return err - } - } - return nil } // Node is a 'cache node'. @@ -579,8 +680,8 @@ type Node struct { size int value Value - ref int32 - onDel []func() + ref int32 + delFuncs []func() CacheData unsafe.Pointer } @@ -618,17 +719,39 @@ func (n *Node) GetHandle() *Handle { return &Handle{unsafe.Pointer(n)} } -func (n *Node) unref() { +func (n *Node) callFinalizer() { + // Call releaser. + if n.value != nil { + if r, ok := n.value.(util.Releaser); ok { + r.Release() + } + n.value = nil + } + + // Call delete funcs. + for _, f := range n.delFuncs { + f() + } + n.delFuncs = nil +} + +func (n *Node) unRefInternal(updateStat bool) { if atomic.AddInt32(&n.ref, -1) == 0 { n.r.delete(n) + if updateStat { + atomic.AddInt64(&n.r.statDel, 1) + } } } -func (n *Node) unrefLocked() { +func (n *Node) unRefExternal() { if atomic.AddInt32(&n.ref, -1) == 0 { n.r.mu.RLock() - if !n.r.closed { + if n.r.closed { + n.callFinalizer() + } else { n.r.delete(n) + atomic.AddInt64(&n.r.statDel, 1) } n.r.mu.RUnlock() } @@ -654,7 +777,7 @@ func (h *Handle) Release() { nPtr := atomic.LoadPointer(&h.n) if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { n := (*Node)(nPtr) - n.unrefLocked() + n.unRefExternal() } } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go index d9a84cde15..383ad5a566 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go @@ -142,51 +142,14 @@ func (r *lru) Evict(n *Node) { r.mu.Unlock() return } + rn.remove() + r.used -= n.Size() n.CacheData = nil r.mu.Unlock() rn.h.Release() } -func (r *lru) EvictNS(ns uint64) { - var evicted []*lruNode - - r.mu.Lock() - for e := r.recent.prev; e != &r.recent; { - rn := e - e = e.prev - if rn.n.NS() == ns { - rn.remove() - rn.n.CacheData = nil - r.used -= rn.n.Size() - evicted = append(evicted, rn) - } - } - r.mu.Unlock() - - for _, rn := range evicted { - rn.h.Release() - } -} - -func (r *lru) EvictAll() { - r.mu.Lock() - back := r.recent.prev - for rn := back; rn != &r.recent; rn = rn.prev { - rn.n.CacheData = nil - } - r.reset() - r.mu.Unlock() - - for rn := back; rn != &r.recent; rn = rn.prev { - rn.h.Release() - } -} - -func (r *lru) Close() error { - return nil -} - // NewLRU create a new LRU-cache. func NewLRU(capacity int) Cacher { r := &lru{capacity: capacity} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go index 8e10e9c1f3..b2724cd9e3 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/journal" @@ -141,7 +142,6 @@ func openDB(s *session) (*DB, error) { } return nil, err } - } // Doesn't need to be included in the wait group. @@ -149,7 +149,9 @@ func openDB(s *session) (*DB, error) { go db.mpoolDrain() if readOnly { - db.SetReadOnly() + if err := db.SetReadOnly(); err != nil { + return nil, err + } } else { db.closeW.Add(2) go db.tCompaction() @@ -311,9 +313,17 @@ func recoverTable(s *session, o *opt.Options) error { return } defer func() { - writer.Close() + if cerr := writer.Close(); cerr != nil { + if err == nil { + err = cerr + } else { + err = fmt.Errorf("error recovering table (%v); error closing (%v)", err, cerr) + } + } if err != nil { - s.stor.Remove(tmpFd) + if rerr := s.stor.Remove(tmpFd); rerr != nil { + err = fmt.Errorf("error recovering table (%v); error removing (%v)", err, rerr) + } tmpFd = storage.FileDesc{} } }() @@ -397,7 +407,7 @@ func recoverTable(s *session, o *opt.Options) error { tSeq = seq } if imin == nil { - imin = append([]byte{}, key...) + imin = append([]byte(nil), key...) } imax = append(imax[:0], key...) } @@ -530,7 +540,8 @@ func (db *DB) recoverJournal() error { if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + // Ignore the error here + _ = jr.Reset(fr, dropper{db.s, fd}, strict, checksum) } // Flush memdb and remove obsolete journal file. @@ -550,7 +561,10 @@ func (db *DB) recoverJournal() error { } rec.resetAddedTables() - db.s.stor.Remove(ofd) + if err := db.s.stor.Remove(ofd); err != nil { + fr.Close() + return err + } ofd = storage.FileDesc{} } @@ -634,7 +648,9 @@ func (db *DB) recoverJournal() error { // Remove the last obsolete journal file. if !ofd.Zero() { - db.s.stor.Remove(ofd) + if err := db.s.stor.Remove(ofd); err != nil { + return err + } } return nil @@ -688,7 +704,9 @@ func (db *DB) recoverJournalRO() error { if jr == nil { jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum) } else { - jr.Reset(fr, dropper{db.s, fd}, strict, checksum) + if err := jr.Reset(fr, dropper{db.s, fd}, strict, checksum); err != nil { + return err + } } // Replay journal to memdb. @@ -765,7 +783,7 @@ func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R if auxm != nil { if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me + return append([]byte(nil), mv...), me } } @@ -777,7 +795,7 @@ func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.R defer m.decref() if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok { - return append([]byte{}, mv...), me + return append([]byte(nil), mv...), me } } @@ -1002,15 +1020,15 @@ func (db *DB) GetProperty(name string) (value string, err error) { } } case p == "blockpool": - value = fmt.Sprintf("%v", db.s.tops.bpool) + value = fmt.Sprintf("%v", db.s.tops.blockBuffer) case p == "cachedblock": - if db.s.tops.bcache != nil { - value = fmt.Sprintf("%d", db.s.tops.bcache.Size()) + if db.s.tops.blockCache != nil { + value = fmt.Sprintf("%d", db.s.tops.blockCache.Size()) } else { value = "" } case p == "openedtables": - value = fmt.Sprintf("%d", db.s.tops.cache.Size()) + value = fmt.Sprintf("%d", db.s.tops.fileCache.Size()) case p == "alivesnaps": value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) case p == "aliveiters": @@ -1037,6 +1055,9 @@ type DBStats struct { BlockCacheSize int OpenedTablesCount int + FileCache cache.Stats + BlockCache cache.Stats + LevelSizes Sizes LevelTablesCounts []int LevelRead Sizes @@ -1062,13 +1083,20 @@ func (db *DB) Stats(s *DBStats) error { s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay)) s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1 - s.OpenedTablesCount = db.s.tops.cache.Size() - if db.s.tops.bcache != nil { - s.BlockCacheSize = db.s.tops.bcache.Size() + s.OpenedTablesCount = db.s.tops.fileCache.Size() + if db.s.tops.blockCache != nil { + s.BlockCacheSize = db.s.tops.blockCache.Size() } else { s.BlockCacheSize = 0 } + s.FileCache = db.s.tops.fileCache.GetStats() + if db.s.tops.blockCache != nil { + s.BlockCache = db.s.tops.blockCache.GetStats() + } else { + s.BlockCache = cache.Stats{} + } + s.AliveIterators = atomic.LoadInt32(&db.aliveIters) s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go index 8942c5d777..cc275ace1d 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go @@ -7,6 +7,7 @@ package leveldb import ( + "fmt" "sync" "sync/atomic" "time" @@ -272,7 +273,7 @@ func (db *DB) memCompaction() { } defer mdb.decref() - db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) + db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(int64(mdb.Size()))) // Don't compact empty memdb. if mdb.Len() == 0 { @@ -350,11 +351,11 @@ func (db *DB) memCompaction() { } type tableCompactionBuilder struct { - db *DB - s *session - c *compaction - rec *sessionRecord - stat0, stat1 *cStatStaging + db *DB + s *session + c *compaction + rec *sessionRecord + stat1 *cStatStaging snapHasLastUkey bool snapLastUkey []byte @@ -410,29 +411,40 @@ func (b *tableCompactionBuilder) flush() error { } b.rec.addTableFile(b.c.sourceLevel+1, t) b.stat1.write += t.size - b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) + b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(t.size), t.imin, t.imax) b.tw = nil return nil } -func (b *tableCompactionBuilder) cleanup() { +func (b *tableCompactionBuilder) cleanup() error { if b.tw != nil { - b.tw.drop() + if err := b.tw.drop(); err != nil { + return err + } b.tw = nil } + return nil } -func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { +func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) (err error) { snapResumed := b.snapIter > 0 hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary. - lastUkey := append([]byte{}, b.snapLastUkey...) + lastUkey := append([]byte(nil), b.snapLastUkey...) lastSeq := b.snapLastSeq b.kerrCnt = b.snapKerrCnt b.dropCnt = b.snapDropCnt // Restore compaction state. b.c.restore() - defer b.cleanup() + defer func() { + if cerr := b.cleanup(); cerr != nil { + if err == nil { + err = cerr + } else { + err = fmt.Errorf("tableCompactionBuilder error: %v, cleanup error (%v)", err, cerr) + } + } + }() b.stat1.startTimer() defer b.stat1.stopTimer() @@ -563,7 +575,7 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) { rec.delTable(c.sourceLevel+i, t.fd.Num) } } - sourceSize := int(stats[0].read + stats[1].read) + sourceSize := stats[0].read + stats[1].read minSeq := db.minSeq() db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) @@ -584,7 +596,7 @@ func (db *DB) tableCompaction(c *compaction, noTrivial bool) { db.compactionCommit("table", rec) stats[1].stopTimer() - resultSize := int(stats[1].write) + resultSize := stats[1].write db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) // Save compaction stats @@ -655,10 +667,7 @@ func (db *DB) tableNeedCompaction() bool { func (db *DB) resumeWrite() bool { v := db.s.version() defer v.release() - if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() { - return true - } - return false + return v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() } func (db *DB) pauseCompaction(ch chan<- struct{}) { @@ -681,7 +690,7 @@ type cAuto struct { func (r cAuto) ack(err error) { if r.ackC != nil { defer func() { - recover() + _ = recover() }() r.ackC <- err } @@ -696,7 +705,7 @@ type cRange struct { func (r cRange) ack(err error) { if r.ackC != nil { defer func() { - recover() + _ = recover() }() r.ackC <- err } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go index e6e8ca59d0..ded13d3eb2 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go @@ -7,7 +7,6 @@ package leveldb import ( - "errors" "math/rand" "runtime" "sync" @@ -18,10 +17,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/util" ) -var ( - errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") -) - type memdbReleaser struct { once sync.Once m *memDB diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go index 65e1c54bb4..29430fee9c 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go @@ -137,8 +137,12 @@ func (db *DB) newMem(n int) (mem *memDB, err error) { if db.journal == nil { db.journal = journal.NewWriter(w) } else { - db.journal.Reset(w) - db.journalWriter.Close() + if err := db.journal.Reset(w); err != nil { + return nil, err + } + if err := db.journalWriter.Close(); err != nil { + return nil, err + } db.frozenJournalFd = db.journalFd } db.journalWriter = w @@ -181,13 +185,6 @@ func (db *DB) getEffectiveMem() *memDB { return db.mem } -// Check whether we has frozen memdb. -func (db *DB) hasFrozenMem() bool { - db.memMu.RLock() - defer db.memMu.RUnlock() - return db.frozenMem != nil -} - // Get frozen memdb. func (db *DB) getFrozenMem() *memDB { db.memMu.RLock() diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go index 21d1e512f3..b7b82fd843 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go @@ -110,7 +110,7 @@ func (tr *Transaction) flush() error { tr.tables = append(tr.tables, t) tr.rec.addTableFile(0, t) tr.stats.write += t.size - tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(t.size), t.imin, t.imax) } return nil } @@ -244,7 +244,7 @@ func (tr *Transaction) Commit() error { // Additionally, wait compaction when certain threshold reached. // Ignore error, returns error only if transaction can't be committed. - tr.db.waitCompaction() + _ = tr.db.waitCompaction() } // Only mark as done if transaction committed successfully. tr.setDone() diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go index db0c1bece1..18eddbe1e0 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go @@ -246,7 +246,10 @@ func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { // Rotate memdb if it's reach the threshold. if batch.internalLen >= mdbFree { - db.rotateMem(0, false) + if _, err := db.rotateMem(0, false); err != nil { + db.unlockWrite(overflow, merged, err) + return err + } } db.unlockWrite(overflow, merged, nil) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go index 8d6146b6f5..0c7f64b284 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go @@ -73,6 +73,7 @@ func SetFd(err error, fd storage.FileDesc) error { case *ErrCorrupted: x.Fd = fd return x + default: + return err } - return err } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go index a23ab05f70..1e4fe4edbd 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go @@ -88,10 +88,7 @@ func (i *basicArrayIterator) Seek(key []byte) bool { return false } i.pos = i.array.Search(key) - if i.pos >= n { - return false - } - return true + return i.pos < n } func (i *basicArrayIterator) Next() bool { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go index 939adbb933..fd0b55adbd 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go @@ -26,10 +26,9 @@ type indexedIterator struct { index IteratorIndexer strict bool - data Iterator - err error - errf func(err error) - closed bool + data Iterator + err error + errf func(err error) } func (i *indexedIterator) setData() { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go index 1a7e29df8f..374e82b66e 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go @@ -7,6 +7,8 @@ package iterator import ( + "container/heap" + "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/util" @@ -33,6 +35,9 @@ type mergedIterator struct { err error errf func(err error) releaser util.Releaser + + indexes []int // the heap of iterator indexes + reverse bool //nolint: structcheck // if true, indexes is a max-heap } func assertKey(key []byte) []byte { @@ -67,16 +72,20 @@ func (i *mergedIterator) First() bool { return false } + h := i.indexHeap() + h.Reset(false) for x, iter := range i.iters { switch { case iter.First(): i.keys[x] = assertKey(iter.Key()) + h.Push(x) case i.iterErr(iter): return false default: i.keys[x] = nil } } + heap.Init(h) i.dir = dirSOI return i.next() } @@ -89,16 +98,20 @@ func (i *mergedIterator) Last() bool { return false } + h := i.indexHeap() + h.Reset(true) for x, iter := range i.iters { switch { case iter.Last(): i.keys[x] = assertKey(iter.Key()) + h.Push(x) case i.iterErr(iter): return false default: i.keys[x] = nil } } + heap.Init(h) i.dir = dirEOI return i.prev() } @@ -111,35 +124,31 @@ func (i *mergedIterator) Seek(key []byte) bool { return false } + h := i.indexHeap() + h.Reset(false) for x, iter := range i.iters { switch { case iter.Seek(key): i.keys[x] = assertKey(iter.Key()) + h.Push(x) case i.iterErr(iter): return false default: i.keys[x] = nil } } + heap.Init(h) i.dir = dirSOI return i.next() } func (i *mergedIterator) next() bool { - var key []byte - if i.dir == dirForward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) { - key = tkey - i.index = x - } - } - if key == nil { + h := i.indexHeap() + if h.Len() == 0 { i.dir = dirEOI return false } + i.index = heap.Pop(h).(int) i.dir = dirForward return true } @@ -156,7 +165,7 @@ func (i *mergedIterator) Next() bool { case dirSOI: return i.First() case dirBackward: - key := append([]byte{}, i.keys[i.index]...) + key := append([]byte(nil), i.keys[i.index]...) if !i.Seek(key) { return false } @@ -168,6 +177,7 @@ func (i *mergedIterator) Next() bool { switch { case iter.Next(): i.keys[x] = assertKey(iter.Key()) + heap.Push(i.indexHeap(), x) case i.iterErr(iter): return false default: @@ -177,20 +187,12 @@ func (i *mergedIterator) Next() bool { } func (i *mergedIterator) prev() bool { - var key []byte - if i.dir == dirBackward { - key = i.keys[i.index] - } - for x, tkey := range i.keys { - if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) { - key = tkey - i.index = x - } - } - if key == nil { + h := i.indexHeap() + if h.Len() == 0 { i.dir = dirSOI return false } + i.index = heap.Pop(h).(int) i.dir = dirBackward return true } @@ -207,7 +209,9 @@ func (i *mergedIterator) Prev() bool { case dirEOI: return i.Last() case dirForward: - key := append([]byte{}, i.keys[i.index]...) + key := append([]byte(nil), i.keys[i.index]...) + h := i.indexHeap() + h.Reset(true) for x, iter := range i.iters { if x == i.index { continue @@ -216,12 +220,14 @@ func (i *mergedIterator) Prev() bool { switch { case seek && iter.Prev(), !seek && iter.Last(): i.keys[x] = assertKey(iter.Key()) + h.Push(x) case i.iterErr(iter): return false default: i.keys[x] = nil } } + heap.Init(h) } x := i.index @@ -229,6 +235,7 @@ func (i *mergedIterator) Prev() bool { switch { case iter.Prev(): i.keys[x] = assertKey(iter.Key()) + heap.Push(i.indexHeap(), x) case i.iterErr(iter): return false default: @@ -259,6 +266,7 @@ func (i *mergedIterator) Release() { } i.iters = nil i.keys = nil + i.indexes = nil if i.releaser != nil { i.releaser.Release() i.releaser = nil @@ -284,6 +292,10 @@ func (i *mergedIterator) SetErrorCallback(f func(err error)) { i.errf = f } +func (i *mergedIterator) indexHeap() *indexHeap { + return (*indexHeap)(i) +} + // NewMergedIterator returns an iterator that merges its input. Walking the // resultant iterator will return all key/value pairs of all input iterators // in strictly increasing key order, as defined by cmp. @@ -296,9 +308,43 @@ func (i *mergedIterator) SetErrorCallback(f func(err error)) { // continue to the next 'input iterator'. func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator { return &mergedIterator{ - iters: iters, - cmp: cmp, - strict: strict, - keys: make([][]byte, len(iters)), + iters: iters, + cmp: cmp, + strict: strict, + keys: make([][]byte, len(iters)), + indexes: make([]int, 0, len(iters)), } } + +// indexHeap implements heap.Interface. +type indexHeap mergedIterator + +func (h *indexHeap) Len() int { return len(h.indexes) } +func (h *indexHeap) Less(i, j int) bool { + i, j = h.indexes[i], h.indexes[j] + r := h.cmp.Compare(h.keys[i], h.keys[j]) + if h.reverse { + return r > 0 + } + return r < 0 +} + +func (h *indexHeap) Swap(i, j int) { + h.indexes[i], h.indexes[j] = h.indexes[j], h.indexes[i] +} + +func (h *indexHeap) Push(value interface{}) { + h.indexes = append(h.indexes, value.(int)) +} + +func (h *indexHeap) Pop() interface{} { + e := len(h.indexes) - 1 + popped := h.indexes[e] + h.indexes = h.indexes[:e] + return popped +} + +func (h *indexHeap) Reset(reverse bool) { + h.reverse = reverse + h.indexes = h.indexes[:0] +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go index d094c3d0f8..f7f8b540ed 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go @@ -354,6 +354,8 @@ type Writer struct { // buf[:written] has already been written to w. // written is zero unless Flush has been called. written int + // blockNumber is the zero based block number currently held in buf. + blockNumber int64 // first is whether the current chunk is the first chunk of the journal. first bool // pending is whether a chunk is buffered but not yet written. @@ -402,6 +404,7 @@ func (w *Writer) writeBlock() { w.i = 0 w.j = headerSize w.written = 0 + w.blockNumber++ } // writePending finishes the current journal and writes the buffer to the @@ -457,6 +460,7 @@ func (w *Writer) Reset(writer io.Writer) (err error) { w.i = 0 w.j = 0 w.written = 0 + w.blockNumber = 0 w.first = false w.pending = false w.err = nil @@ -474,7 +478,7 @@ func (w *Writer) Next() (io.Writer, error) { w.fillHeader(true) } w.i = w.j - w.j = w.j + headerSize + w.j += headerSize // Check if there is room in the block for the header. if w.j > blockSize { // Fill in the rest of the block with zeroes. @@ -491,6 +495,14 @@ func (w *Writer) Next() (io.Writer, error) { return singleWriter{w, w.seq}, nil } +// Size returns the current size of the file. +func (w *Writer) Size() int64 { + if w == nil { + return 0 + } + return w.blockNumber*blockSize + int64(w.j) +} + type singleWriter struct { w *Writer seq int diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go index ad8f51ec85..dc7be1fad9 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/key.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go @@ -25,7 +25,7 @@ func (e *ErrInternalKeyCorrupted) Error() string { } func newErrInternalKeyCorrupted(ikey []byte, reason string) error { - return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason}) + return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte(nil), ikey...), reason}) } type keyType uint @@ -90,7 +90,7 @@ func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length") } num := binary.LittleEndian.Uint64(ik[len(ik)-8:]) - seq, kt = uint64(num>>8), keyType(num&0xff) + seq, kt = num>>8, keyType(num&0xff) if kt > keyTypeVal { return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type") } @@ -124,7 +124,7 @@ func (ik internalKey) num() uint64 { func (ik internalKey) parseNum() (seq uint64, kt keyType) { num := ik.num() - seq, kt = uint64(num>>8), keyType(num&0xff) + seq, kt = num>>8, keyType(num&0xff) if kt > keyTypeVal { panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt)) } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go index dead5fdfbe..48fb0416dc 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go @@ -41,6 +41,7 @@ var ( DefaultWriteL0PauseTrigger = 12 DefaultWriteL0SlowdownTrigger = 8 DefaultFilterBaseLg = 11 + DefaultMaxManifestFileSize = int64(64 * MiB) ) // Cacher is a caching algorithm. @@ -48,23 +49,60 @@ type Cacher interface { New(capacity int) cache.Cacher } -type CacherFunc struct { +type cacherFunc struct { NewFunc func(capacity int) cache.Cacher } -func (f *CacherFunc) New(capacity int) cache.Cacher { +func (f *cacherFunc) New(capacity int) cache.Cacher { if f != nil && f.NewFunc != nil { return f.NewFunc(capacity) } return nil } +func CacherFunc(f func(capacity int) cache.Cacher) Cacher { + return &cacherFunc{f} +} + +type passthroughCacher struct { + Cacher cache.Cacher +} + +func (p *passthroughCacher) New(capacity int) cache.Cacher { + return p.Cacher +} + +// PassthroughCacher can be used to passthrough pre-initialized +// 'cacher instance'. This is useful for sharing cache over multiple +// DB instances. +// +// Shared cache example: +// +// fileCache := opt.NewLRU(500) +// blockCache := opt.NewLRU(8 * opt.MiB) +// options := &opt.Options{ +// OpenFilesCacher: fileCache, +// BlockCacher: blockCache, +// } +// db1, err1 := leveldb.OpenFile("path/to/db1", options) +// ... +// db2, err2 := leveldb.OpenFile("path/to/db2", options) +// ... +func PassthroughCacher(x cache.Cacher) Cacher { + return &passthroughCacher{x} +} + +// NewLRU creates LRU 'passthrough cacher'. +func NewLRU(capacity int) Cacher { + return PassthroughCacher(cache.NewLRU(capacity)) +} + var ( // LRUCacher is the LRU-cache algorithm. - LRUCacher = &CacherFunc{cache.NewLRU} + LRUCacher = CacherFunc(cache.NewLRU) // NoCacher is the value to disable caching algorithm. - NoCacher = &CacherFunc{} + NoCacher = CacherFunc(nil) ) // Compression is the 'sorted table' block compression algorithm to use. @@ -376,6 +414,13 @@ type Options struct { // // The default value is 11(as well as 2KB) FilterBaseLg int + + // MaxManifestFileSize is the maximum size limit of the MANIFEST-****** file. + // When the MANIFEST-****** file grows beyond this size, LevelDB will create + // a new MANIFEST file. + // + // The default value is 64 MiB. + MaxManifestFileSize int64 } func (o *Options) GetAltFilters() []filter.Filter { @@ -715,7 +760,13 @@ func (wo *WriteOptions) GetSync() bool { func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool { if ro.GetStrict(StrictOverride) { return ro.GetStrict(strict) - } else { - return o.GetStrict(strict) || ro.GetStrict(strict) } + return o.GetStrict(strict) || ro.GetStrict(strict) +} + +func (o *Options) GetMaxManifestFileSize() int64 { + if o == nil || o.MaxManifestFileSize <= 0 { + return DefaultMaxManifestFileSize + } + return o.MaxManifestFileSize } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_darwin.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_darwin.go index 67b820427f..e7490816e7 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_darwin.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin package opt diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_default.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_default.go index 97a14a892a..4c9f4b05ba 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_default.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options_default.go @@ -1,3 +1,4 @@ +//go:build !darwin // +build !darwin package opt diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go index e143352176..036570e0f1 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go @@ -54,7 +54,7 @@ type session struct { stCompPtrs []internalKey // compaction pointers; need external synchronization stVersion *version // current version - ntVersionId int64 // next version id to assign + ntVersionID int64 // next version id to assign refCh chan *vTask relCh chan *vTask deltaCh chan *vDelta @@ -107,7 +107,7 @@ func (s *session) close() { } s.manifest = nil s.manifestWriter = nil - s.setVersion(nil, &version{s: s, closing: true, id: s.ntVersionId}) + s.setVersion(nil, &version{s: s, closing: true, id: s.ntVersionID}) // Close all background goroutines close(s.closeC) @@ -171,7 +171,7 @@ func (s *session) recover() (err error) { if err == nil { // save compact pointers for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) + s.setCompPtr(r.level, r.ikey) } // commit record to version staging staging.commit(rec) @@ -226,6 +226,9 @@ func (s *session) commit(r *sessionRecord, trivial bool) (err error) { if s.manifest == nil { // manifest journal writer not yet created, create one err = s.newManifest(r, nv) + } else if s.manifest.Size() >= s.o.GetMaxManifestFileSize() { + // pass nil sessionRecord to avoid over-reference table file + err = s.newManifest(nil, nv) } else { err = s.flushManifest(r) } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go index b46a3e4536..2fd5f32e66 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go @@ -48,7 +48,7 @@ func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (i flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel) rec.addTableFile(flushLevel, t) - s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) + s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(t.size), t.imin, t.imax) return flushLevel, nil } @@ -226,8 +226,8 @@ func (c *compaction) expand() { exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false) if len(exp1) == len(t1) { c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)", - c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())), - len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size()))) + c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(t0.size()), len(t1), shortenb(t1.size()), + len(exp0), shortenb(exp0.size()), len(exp1), shortenb(exp1.size())) imin, imax = xmin, xmax t0, t1 = exp0, exp1 amin, amax = append(t0, t1...).getRange(c.s.icmp) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go index 854e1aa6f9..b1a352f671 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go @@ -201,7 +201,7 @@ func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF } x, err := binary.ReadUvarint(r) if err != nil { - if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) { + if err == io.ErrUnexpectedEOF || (!mayEOF && err == io.EOF) { p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, "short read"}) } else if strings.HasPrefix(err.Error(), "binary:") { p.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrManifestCorrupted{field, err.Error()}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go index 730bd2cd34..f467f2d4bc 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_util.go @@ -24,7 +24,7 @@ type dropper struct { func (d dropper) Drop(err error) { if e, ok := err.(*journal.ErrCorrupted); ok { - d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason) + d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(int64(e.Size)), e.Reason) } else { d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err) } @@ -130,7 +130,7 @@ func (s *session) refLoop() { for { // Skip any abandoned version number to prevent blocking processing. if skipAbandoned() { - next += 1 + next++ continue } // Don't bother the version that has been released. @@ -162,13 +162,13 @@ func (s *session) refLoop() { referenced[next] = struct{}{} delete(ref, next) delete(deltas, next) - next += 1 + next++ } // Use delta information to process all released versions. for { if skipAbandoned() { - next += 1 + next++ continue } if d, exist := released[next]; exist { @@ -176,7 +176,7 @@ func (s *session) refLoop() { applyDelta(d) } delete(released, next) - next += 1 + next++ continue } return @@ -396,7 +396,7 @@ func (s *session) recordCommited(rec *sessionRecord) { } for _, r := range rec.compPtrs { - s.setCompPtr(r.level, internalKey(r.ikey)) + s.setCompPtr(r.level, r.ikey) } } @@ -429,14 +429,16 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) { s.manifestWriter.Close() } if !s.manifestFd.Zero() { - s.stor.Remove(s.manifestFd) + err = s.stor.Remove(s.manifestFd) } s.manifestFd = fd s.manifestWriter = writer s.manifest = jw } else { writer.Close() - s.stor.Remove(fd) + if rerr := s.stor.Remove(fd); err != nil { + err = fmt.Errorf("newManifest error: %v, cleanup error (%v)", err, rerr) + } s.reuseFileNum(fd.Num) } }() diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go index 9ba71fd6d1..3c5e70a0e3 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go @@ -111,7 +111,9 @@ func OpenFile(path string, readOnly bool) (Storage, error) { defer func() { if err != nil { - flock.release() + if ferr := flock.release(); ferr != nil { + err = fmt.Errorf("error opening file (%v); error unlocking file (%v)", err, ferr) + } } }() @@ -175,12 +177,13 @@ func itoa(buf []byte, i int, wid int) []byte { return append(buf, b[bp:]...) } -func (fs *fileStorage) printDay(t time.Time) { +func (fs *fileStorage) printDay(t time.Time) error { if fs.day == t.Day() { - return + return nil } fs.day = t.Day() - fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) + _, err := fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n")) + return err } func (fs *fileStorage) doLog(t time.Time, str string) { @@ -189,7 +192,9 @@ func (fs *fileStorage) doLog(t time.Time, str string) { fs.logw.Close() fs.logw = nil fs.logSize = 0 - rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")) + if err := rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old")); err != nil { + return + } } if fs.logw == nil { var err error @@ -200,7 +205,9 @@ func (fs *fileStorage) doLog(t time.Time, str string) { // Force printDay on new log file. fs.day = 0 } - fs.printDay(t) + if err := fs.printDay(t); err != nil { + return + } hour, min, sec := t.Clock() msec := t.Nanosecond() / 1e3 // time @@ -634,8 +641,9 @@ func fsGenOldName(fd FileDesc) string { switch fd.Type { case TypeTable: return fmt.Sprintf("%06d.sst", fd.Num) + default: + return fsGenName(fd) } - return fsGenName(fd) } func fsParseName(name string) (fd FileDesc, ok bool) { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go index 5545aeef2a..b23d4652b3 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +//go:build nacl // +build nacl package storage diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go index 79901ee4a7..cd84ce2e95 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +//go:build solaris // +build solaris package storage diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go index d75f66a9ef..601ffe3997 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go @@ -4,6 +4,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build darwin dragonfly freebsd linux netbsd openbsd package storage diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go index 838f1bee1b..a32972ad66 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go @@ -29,7 +29,6 @@ func (lock *memStorageLock) Unlock() { if ms.slock == lock { ms.slock = nil } - return } // memStorage is a memory-backed storage. diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go index 4e4a724258..b385fc6faf 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go @@ -59,8 +59,9 @@ func isCorrupted(err error) bool { switch err.(type) { case *ErrCorrupted: return true + default: + return false } - return false } func (e *ErrCorrupted) Error() string { diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go index 3de881d684..d0fab40c40 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go @@ -88,18 +88,6 @@ type tFiles []*tFile func (tf tFiles) Len() int { return len(tf) } func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } -func (tf tFiles) nums() string { - x := "[ " - for i, f := range tf { - if i != 0 { - x += ", " - } - x += fmt.Sprint(f.fd.Num) - } - x += " ]" - return x -} - // Returns true if i smallest key is less than j. // This used for sort by key in ascending order. func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { @@ -360,9 +348,9 @@ type tOps struct { s *session noSync bool evictRemoved bool - cache *cache.Cache - bcache *cache.Cache - bpool *util.BufferPool + fileCache *cache.Cache + blockCache *cache.Cache + blockBuffer *util.BufferPool } // Creates an empty table and returns table writer. @@ -376,7 +364,7 @@ func (t *tOps) create(tSize int) (*tWriter, error) { t: t, fd: fd, w: fw, - tw: table.NewWriter(fw, t.s.o.Options, t.bpool, tSize), + tw: table.NewWriter(fw, t.s.o.Options, t.blockBuffer, tSize), }, nil } @@ -389,7 +377,9 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { defer func() { if err != nil { - w.drop() + if derr := w.drop(); derr != nil { + err = fmt.Errorf("error createFrom (%v); error dropping (%v)", err, derr) + } } }() @@ -412,22 +402,22 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { - ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { + ch = t.fileCache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } - var bcache *cache.NamespaceGetter - if t.bcache != nil { - bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} + var blockCache *cache.NamespaceGetter + if t.blockCache != nil { + blockCache = &cache.NamespaceGetter{Cache: t.blockCache, NS: uint64(f.fd.Num)} } var tr *table.Reader - tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) + tr, err = table.NewReader(r, f.size, f.fd, blockCache, t.blockBuffer, t.s.o.Options) if err != nil { - r.Close() + _ = r.Close() return 0, nil } return 1, tr @@ -484,14 +474,14 @@ func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) ite // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(fd storage.FileDesc) { - t.cache.Delete(0, uint64(fd.Num), func() { + t.fileCache.Delete(0, uint64(fd.Num), func() { if err := t.s.stor.Remove(fd); err != nil { t.s.logf("table@remove removing @%d %q", fd.Num, err) } else { t.s.logf("table@remove removed @%d", fd.Num) } - if t.evictRemoved && t.bcache != nil { - t.bcache.EvictNS(uint64(fd.Num)) + if t.evictRemoved && t.blockCache != nil { + t.blockCache.EvictNS(uint64(fd.Num)) } // Try to reuse file num, useful for discarded transaction. t.s.reuseFileNum(fd.Num) @@ -501,39 +491,39 @@ func (t *tOps) remove(fd storage.FileDesc) { // Closes the table ops instance. It will close all tables, // regadless still used or not. func (t *tOps) close() { - t.cache.Close() - if t.bcache != nil { - t.bcache.CloseWeak() + t.fileCache.Close(true) + if t.blockCache != nil { + t.blockCache.Close(false) } } // Creates new initialized table ops instance. func newTableOps(s *session) *tOps { var ( - cacher cache.Cacher - bcache *cache.Cache - bpool *util.BufferPool + fileCacher cache.Cacher + blockCache *cache.Cache + blockBuffer *util.BufferPool ) if s.o.GetOpenFilesCacheCapacity() > 0 { - cacher = s.o.GetOpenFilesCacher().New(s.o.GetOpenFilesCacheCapacity()) + fileCacher = s.o.GetOpenFilesCacher().New(s.o.GetOpenFilesCacheCapacity()) } if !s.o.GetDisableBlockCache() { - var bcacher cache.Cacher + var blockCacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { - bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) + blockCacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } - bcache = cache.NewCache(bcacher) + blockCache = cache.NewCache(blockCacher) } if !s.o.GetDisableBufferPool() { - bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) + blockBuffer = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ s: s, noSync: s.o.GetNoSync(), evictRemoved: s.o.GetBlockCacheEvictRemoved(), - cache: cache.NewCache(cacher), - bcache: bcache, - bpool: bpool, + fileCache: cache.NewCache(fileCacher), + blockCache: blockCache, + blockBuffer: blockBuffer, } } @@ -552,7 +542,7 @@ type tWriter struct { // Append key/value pair to the table. func (w *tWriter) append(key, value []byte) error { if w.first == nil { - w.first = append([]byte{}, key...) + w.first = append([]byte(nil), key...) } w.last = append(w.last[:0], key...) return w.tw.Append(key, value) @@ -564,16 +554,27 @@ func (w *tWriter) empty() bool { } // Closes the storage.Writer. -func (w *tWriter) close() { +func (w *tWriter) close() error { if w.w != nil { - w.w.Close() + if err := w.w.Close(); err != nil { + return err + } w.w = nil } + return nil } // Finalizes the table and returns table file. func (w *tWriter) finish() (f *tFile, err error) { - defer w.close() + defer func() { + if cerr := w.close(); cerr != nil { + if err == nil { + err = cerr + } else { + err = fmt.Errorf("error opening file (%v); error unlocking file (%v)", err, cerr) + } + } + }() err = w.tw.Close() if err != nil { return @@ -589,11 +590,16 @@ func (w *tWriter) finish() (f *tFile, err error) { } // Drops the table. -func (w *tWriter) drop() { - w.close() - w.t.s.stor.Remove(w.fd) - w.t.s.reuseFileNum(w.fd.Num) +func (w *tWriter) drop() error { + if err := w.close(); err != nil { + return err + } w.tw = nil w.first = nil w.last = nil + if err := w.t.s.stor.Remove(w.fd); err != nil { + return err + } + w.t.s.reuseFileNum(w.fd.Num) + return nil } diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go index 496feb6fb4..8128794c22 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go @@ -901,7 +901,7 @@ func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bo } else { // Value does use block buffer, and since the buffer will be // recycled, it need to be copied. - value = append([]byte{}, data.Value()...) + value = append([]byte(nil), data.Value()...) } } data.Release() diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go index ad837f21b2..ea89d600e7 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go @@ -40,7 +40,7 @@ type blockWriter struct { scratch []byte } -func (w *blockWriter) append(key, value []byte) { +func (w *blockWriter) append(key, value []byte) (err error) { nShared := 0 if w.nEntries%w.restartInterval == 0 { w.restarts = append(w.restarts, uint32(w.buf.Len())) @@ -50,14 +50,21 @@ func (w *blockWriter) append(key, value []byte) { n := binary.PutUvarint(w.scratch[0:], uint64(nShared)) n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared)) n += binary.PutUvarint(w.scratch[n:], uint64(len(value))) - w.buf.Write(w.scratch[:n]) - w.buf.Write(key[nShared:]) - w.buf.Write(value) + if _, err = w.buf.Write(w.scratch[:n]); err != nil { + return err + } + if _, err = w.buf.Write(key[nShared:]); err != nil { + return err + } + if _, err = w.buf.Write(value); err != nil { + return err + } w.prevKey = append(w.prevKey[:0], key...) w.nEntries++ + return nil } -func (w *blockWriter) finish() { +func (w *blockWriter) finish() error { // Write restarts entry. if w.nEntries == 0 { // Must have at least one restart entry. @@ -68,6 +75,7 @@ func (w *blockWriter) finish() { buf4 := w.buf.Alloc(4) binary.LittleEndian.PutUint32(buf4, x) } + return nil } func (w *blockWriter) reset() { @@ -109,9 +117,9 @@ func (w *filterWriter) flush(offset uint64) { } } -func (w *filterWriter) finish() { +func (w *filterWriter) finish() error { if w.generator == nil { - return + return nil } // Generate last keys. @@ -123,7 +131,7 @@ func (w *filterWriter) finish() { buf4 := w.buf.Alloc(4) binary.LittleEndian.PutUint32(buf4, x) } - w.buf.WriteByte(byte(w.baseLg)) + return w.buf.WriteByte(byte(w.baseLg)) } func (w *filterWriter) generate() { @@ -194,9 +202,9 @@ func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh b return } -func (w *Writer) flushPendingBH(key []byte) { +func (w *Writer) flushPendingBH(key []byte) error { if w.pendingBH.length == 0 { - return + return nil } var separator []byte if len(key) == 0 { @@ -211,15 +219,20 @@ func (w *Writer) flushPendingBH(key []byte) { } n := encodeBlockHandle(w.scratch[:20], w.pendingBH) // Append the block handle to the index block. - w.indexBlock.append(separator, w.scratch[:n]) + if err := w.indexBlock.append(separator, w.scratch[:n]); err != nil { + return err + } // Reset prev key of the data block. w.dataBlock.prevKey = w.dataBlock.prevKey[:0] // Clear pending block handle. w.pendingBH = blockHandle{} + return nil } func (w *Writer) finishBlock() error { - w.dataBlock.finish() + if err := w.dataBlock.finish(); err != nil { + return err + } bh, err := w.writeBlock(&w.dataBlock.buf, w.compression) if err != nil { return err @@ -245,9 +258,13 @@ func (w *Writer) Append(key, value []byte) error { return w.err } - w.flushPendingBH(key) + if err := w.flushPendingBH(key); err != nil { + return err + } // Append key/value pair to the data block. - w.dataBlock.append(key, value) + if err := w.dataBlock.append(key, value); err != nil { + return err + } // Add key to the filter block. w.filterBlock.add(key) @@ -308,11 +325,15 @@ func (w *Writer) Close() error { return w.err } } - w.flushPendingBH(nil) + if err := w.flushPendingBH(nil); err != nil { + return err + } // Write the filter block. var filterBH blockHandle - w.filterBlock.finish() + if err := w.filterBlock.finish(); err != nil { + return err + } if buf := &w.filterBlock.buf; buf.Len() > 0 { filterBH, w.err = w.writeBlock(buf, opt.NoCompression) if w.err != nil { @@ -324,9 +345,13 @@ func (w *Writer) Close() error { if filterBH.length > 0 { key := []byte("filter." + w.filter.Name()) n := encodeBlockHandle(w.scratch[:20], filterBH) - w.dataBlock.append(key, w.scratch[:n]) + if err := w.dataBlock.append(key, w.scratch[:n]); err != nil { + return err + } + } + if err := w.dataBlock.finish(); err != nil { + return err } - w.dataBlock.finish() metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression) if err != nil { w.err = err @@ -334,7 +359,9 @@ func (w *Writer) Close() error { } // Write the index block. - w.indexBlock.finish() + if err := w.indexBlock.finish(); err != nil { + return err + } indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression) if err != nil { w.err = err diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go index 0e2b519e5c..1ef859d4db 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go @@ -22,7 +22,7 @@ func shorten(str string) string { var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"} -func shortenb(bytes int) string { +func shortenb(bytes int64) string { i := 0 for ; bytes > 1024 && i < 4; i++ { bytes /= 1024 @@ -30,7 +30,7 @@ func shortenb(bytes int) string { return fmt.Sprintf("%d%sB", bytes, bunits[i]) } -func sshortenb(bytes int) string { +func sshortenb(bytes int64) string { if bytes == 0 { return "~" } @@ -58,13 +58,6 @@ func sint(x int) string { return fmt.Sprintf("%s%d", sign, x) } -func minInt(a, b int) int { - if a < b { - return a - } - return b -} - func maxInt(a, b int) int { if a > b { return a diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go index b47e79f020..4f512f6d3f 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go @@ -110,22 +110,22 @@ func NewBufferPool(baseline int) *BufferPool { bufPool := &BufferPool{ baseline: [...]int{baseline / 4, baseline / 2, baseline, baseline * 2, baseline * 4}, pool: [6]sync.Pool{ - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, - sync.Pool{ + { New: func() interface{} { return new([]byte) }, }, }, diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go index 9535e35914..467250917b 100644 --- a/vendor/github.com/syndtr/goleveldb/leveldb/version.go +++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go @@ -43,7 +43,7 @@ type version struct { // newVersion creates a new version with an unique monotonous increasing id. func newVersion(s *session) *version { - id := atomic.AddInt64(&s.ntVersionId, 1) + id := atomic.AddInt64(&s.ntVersionID, 1) nv := &version{s: s, id: id - 1} return nv } @@ -388,7 +388,7 @@ func (v *version) computeCompaction() { } statFiles[level] = len(tables) - statSizes[level] = shortenb(int(size)) + statSizes[level] = shortenb(size) statScore[level] = fmt.Sprintf("%.2f", score) statTotSize += size } @@ -396,7 +396,7 @@ func (v *version) computeCompaction() { v.cLevel = bestLevel v.cScore = bestScore - v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore) + v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(statTotSize), statSizes, statScore) } func (v *version) needCompaction() bool { diff --git a/vendor/github.com/tektoncd/chains/pkg/artifacts/signable.go b/vendor/github.com/tektoncd/chains/pkg/artifacts/signable.go index 88bd77759f..cafbe44223 100644 --- a/vendor/github.com/tektoncd/chains/pkg/artifacts/signable.go +++ b/vendor/github.com/tektoncd/chains/pkg/artifacts/signable.go @@ -21,6 +21,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/opencontainers/go-digest" "github.com/tektoncd/chains/pkg/chains/formats" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" @@ -28,7 +29,7 @@ import ( ) type Signable interface { - ExtractObjects(tr *v1beta1.TaskRun) []interface{} + ExtractObjects(obj objects.TektonObject) []interface{} StorageBackend(cfg config.Config) sets.String Signer(cfg config.Config) string PayloadFormat(cfg config.Config) formats.PayloadType @@ -42,13 +43,14 @@ type TaskRunArtifact struct { } func (ta *TaskRunArtifact) Key(obj interface{}) string { - tr := obj.(*v1beta1.TaskRun) - return "taskrun-" + string(tr.UID) + tro := obj.(*objects.TaskRunObject) + return "taskrun-" + string(tro.UID) } -func (ta *TaskRunArtifact) ExtractObjects(tr *v1beta1.TaskRun) []interface{} { - return []interface{}{tr} +func (ta *TaskRunArtifact) ExtractObjects(obj objects.TektonObject) []interface{} { + return []interface{}{obj} } + func (ta *TaskRunArtifact) Type() string { return "tekton" } @@ -69,6 +71,40 @@ func (ta *TaskRunArtifact) Enabled(cfg config.Config) bool { return cfg.Artifacts.TaskRuns.Enabled() } +type PipelineRunArtifact struct { + Logger *zap.SugaredLogger +} + +func (pa *PipelineRunArtifact) Key(obj interface{}) string { + pro := obj.(*objects.PipelineRunObject) + return "pipelinerun-" + string(pro.UID) +} + +func (pa *PipelineRunArtifact) ExtractObjects(obj objects.TektonObject) []interface{} { + return []interface{}{obj} +} + +func (pa *PipelineRunArtifact) Type() string { + // TODO: Is this right? + return "tekton-pipeline-run" +} + +func (pa *PipelineRunArtifact) StorageBackend(cfg config.Config) sets.String { + return cfg.Artifacts.PipelineRuns.StorageBackend +} + +func (pa *PipelineRunArtifact) PayloadFormat(cfg config.Config) formats.PayloadType { + return formats.PayloadType(cfg.Artifacts.PipelineRuns.Format) +} + +func (pa *PipelineRunArtifact) Signer(cfg config.Config) string { + return cfg.Artifacts.PipelineRuns.Signer +} + +func (pa *PipelineRunArtifact) Enabled(cfg config.Config) bool { + return cfg.Artifacts.PipelineRuns.Enabled() +} + type OCIArtifact struct { Logger *zap.SugaredLogger } @@ -86,49 +122,53 @@ type StructuredSignable struct { Digest string } -func (oa *OCIArtifact) ExtractObjects(tr *v1beta1.TaskRun) []interface{} { - imageResourceNames := map[string]*image{} - if tr.Status.TaskSpec != nil && tr.Status.TaskSpec.Resources != nil { - for _, output := range tr.Status.TaskSpec.Resources.Outputs { - if output.Type == v1beta1.PipelineResourceTypeImage { - imageResourceNames[output.Name] = &image{} +func (oa *OCIArtifact) ExtractObjects(obj objects.TektonObject) []interface{} { + objs := []interface{}{} + + // TODO: Not applicable to PipelineRuns, should look into a better way to separate this out + if tr, ok := obj.GetObject().(*v1beta1.TaskRun); ok { + imageResourceNames := map[string]*image{} + if tr.Status.TaskSpec != nil && tr.Status.TaskSpec.Resources != nil { + for _, output := range tr.Status.TaskSpec.Resources.Outputs { + if output.Type == v1beta1.PipelineResourceTypeImage { + imageResourceNames[output.Name] = &image{} + } } } - } - for _, rr := range tr.Status.ResourcesResult { - img, ok := imageResourceNames[rr.ResourceName] - if !ok { - continue - } - // We have a result for an image! - if rr.Key == "url" { - img.url = rr.Value - } else if rr.Key == "digest" { - img.digest = rr.Value + for _, rr := range tr.Status.ResourcesResult { + img, ok := imageResourceNames[rr.ResourceName] + if !ok { + continue + } + // We have a result for an image! + if rr.Key == "url" { + img.url = rr.Value + } else if rr.Key == "digest" { + img.digest = rr.Value + } } - } - objs := []interface{}{} - for _, image := range imageResourceNames { - dgst, err := name.NewDigest(fmt.Sprintf("%s@%s", image.url, image.digest)) - if err != nil { - oa.Logger.Error(err) - continue + for _, image := range imageResourceNames { + dgst, err := name.NewDigest(fmt.Sprintf("%s@%s", image.url, image.digest)) + if err != nil { + oa.Logger.Error(err) + continue + } + objs = append(objs, dgst) } - objs = append(objs, dgst) } // Now check TaskResults - resultImages := ExtractOCIImagesFromResults(tr, oa.Logger) + resultImages := ExtractOCIImagesFromResults(obj, oa.Logger) objs = append(objs, resultImages...) return objs } -func ExtractOCIImagesFromResults(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) []interface{} { - ss := extractTargetFromResults(tr, "IMAGE_URL", "IMAGE_DIGEST", logger) +func ExtractOCIImagesFromResults(obj objects.TektonObject, logger *zap.SugaredLogger) []interface{} { objs := []interface{}{} + ss := extractTargetFromResults(obj, "IMAGE_URL", "IMAGE_DIGEST", logger) for _, s := range ss { if s == nil || s.Digest == "" || s.URI == "" { continue @@ -142,7 +182,7 @@ func ExtractOCIImagesFromResults(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) objs = append(objs, dgst) } // look for a comma separated list of images - for _, key := range tr.Status.TaskRunResults { + for _, key := range obj.GetResults() { if key.Name != "IMAGES" { continue } @@ -166,9 +206,9 @@ func ExtractOCIImagesFromResults(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) } // ExtractSignableTargetFromResults extracts signable targets that aim to generate intoto provenance as materials within TaskRun results and store them as StructuredSignable. -func ExtractSignableTargetFromResults(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) []*StructuredSignable { +func ExtractSignableTargetFromResults(obj objects.TektonObject, logger *zap.SugaredLogger) []*StructuredSignable { objs := []*StructuredSignable{} - ss := extractTargetFromResults(tr, "ARTIFACT_URI", "ARTIFACT_DIGEST", logger) + ss := extractTargetFromResults(obj, "ARTIFACT_URI", "ARTIFACT_DIGEST", logger) // Only add it if we got both the signable URI and digest. for _, s := range ss { if s == nil || s.Digest == "" || s.URI == "" { @@ -190,10 +230,10 @@ func (s *StructuredSignable) FullRef() string { return fmt.Sprintf("%s@%s", s.URI, s.Digest) } -func extractTargetFromResults(tr *v1beta1.TaskRun, identifierSuffix string, digestSuffix string, logger *zap.SugaredLogger) map[string]*StructuredSignable { +func extractTargetFromResults(obj objects.TektonObject, identifierSuffix string, digestSuffix string, logger *zap.SugaredLogger) map[string]*StructuredSignable { ss := map[string]*StructuredSignable{} - for _, res := range tr.Status.TaskRunResults { + for _, res := range obj.GetResults() { if strings.HasSuffix(res.Name, identifierSuffix) { marker := strings.TrimSuffix(res.Name, identifierSuffix) if v, ok := ss[marker]; ok { diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/annotations.go b/vendor/github.com/tektoncd/chains/pkg/chains/annotations.go index 69fc51ba8e..6fb37d24ac 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/annotations.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/annotations.go @@ -19,11 +19,9 @@ import ( "strconv" "github.com/pkg/errors" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/patch" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) const ( @@ -34,52 +32,53 @@ const ( MaxRetries = 3 ) -// Reconciled determines whether a TaskRun has already passed through the reconcile loops, up to 3x -func Reconciled(tr *v1beta1.TaskRun) bool { - val, ok := tr.ObjectMeta.Annotations[ChainsAnnotation] +// Reconciled determines whether a Tekton object has already passed through the reconcile loops, up to 3x +func Reconciled(obj objects.TektonObject) bool { + annotations := obj.GetAnnotations() + val, ok := annotations[ChainsAnnotation] if !ok { return false } return val == "true" || val == "failed" } -// MarkSigned marks a TaskRun as signed. -func MarkSigned(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interface, annotations map[string]string) error { - if _, ok := tr.Annotations[ChainsAnnotation]; ok { +// MarkSigned marks a Tekton object as signed. +func MarkSigned(ctx context.Context, obj objects.TektonObject, ps versioned.Interface, annotations map[string]string) error { + if _, ok := obj.GetAnnotations()[ChainsAnnotation]; ok { return nil } - return AddAnnotation(ctx, tr, ps, ChainsAnnotation, "true", annotations) + return AddAnnotation(ctx, obj, ps, ChainsAnnotation, "true", annotations) } -func MarkFailed(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interface, annotations map[string]string) error { - return AddAnnotation(ctx, tr, ps, ChainsAnnotation, "failed", annotations) +func MarkFailed(ctx context.Context, obj objects.TektonObject, ps versioned.Interface, annotations map[string]string) error { + return AddAnnotation(ctx, obj, ps, ChainsAnnotation, "failed", annotations) } -func RetryAvailable(tr *v1beta1.TaskRun) bool { - retries, ok := tr.Annotations[RetryAnnotation] +func RetryAvailable(obj objects.TektonObject) bool { + ann, ok := obj.GetAnnotations()[RetryAnnotation] if !ok { return true } - val, err := strconv.Atoi(retries) + val, err := strconv.Atoi(ann) if err != nil { return false } return val < MaxRetries } -func AddRetry(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interface, annotations map[string]string) error { - retries := tr.Annotations[RetryAnnotation] - if retries == "" { - return AddAnnotation(ctx, tr, ps, RetryAnnotation, "0", annotations) +func AddRetry(ctx context.Context, obj objects.TektonObject, ps versioned.Interface, annotations map[string]string) error { + ann := obj.GetAnnotations()[RetryAnnotation] + if ann == "" { + return AddAnnotation(ctx, obj, ps, RetryAnnotation, "0", annotations) } - val, err := strconv.Atoi(retries) + val, err := strconv.Atoi(ann) if err != nil { return errors.Wrap(err, "adding retry") } - return AddAnnotation(ctx, tr, ps, RetryAnnotation, fmt.Sprintf("%d", val+1), annotations) + return AddAnnotation(ctx, obj, ps, RetryAnnotation, fmt.Sprintf("%d", val+1), annotations) } -func AddAnnotation(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interface, key, value string, annotations map[string]string) error { +func AddAnnotation(ctx context.Context, obj objects.TektonObject, ps versioned.Interface, key, value string, annotations map[string]string) error { // Use patch instead of update to help prevent race conditions. if annotations == nil { annotations = map[string]string{} @@ -89,8 +88,8 @@ func AddAnnotation(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interf if err != nil { return err } - if _, err := ps.TektonV1beta1().TaskRuns(tr.Namespace).Patch( - ctx, tr.Name, types.MergePatchType, patchBytes, v1.PatchOptions{}); err != nil { + err = obj.Patch(ctx, ps, patchBytes) + if err != nil { return err } return nil diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest/attest.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest/attest.go new file mode 100644 index 0000000000..653197be52 --- /dev/null +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest/attest.go @@ -0,0 +1,87 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package attest + +import ( + "fmt" + "strings" + + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +const ( + CommitParam = "CHAINS-GIT_COMMIT" + URLParam = "CHAINS-GIT_URL" + ChainsReproducibleAnnotation = "chains.tekton.dev/reproducible" +) + +type StepAttestation struct { + EntryPoint string `json:"entryPoint"` + Arguments interface{} `json:"arguments,omitempty"` + Environment interface{} `json:"environment,omitempty"` + Annotations map[string]string `json:"annotations"` +} + +func Step(step *v1beta1.Step, stepState *v1beta1.StepState) StepAttestation { + attestation := StepAttestation{} + + entrypoint := strings.Join(step.Command, " ") + if step.Script != "" { + entrypoint = step.Script + } + attestation.EntryPoint = entrypoint + attestation.Arguments = step.Args + + env := map[string]interface{}{} + env["image"] = stepState.ImageID + env["container"] = stepState.Name + attestation.Environment = env + + return attestation +} + +func Invocation(params []v1beta1.Param, paramSpecs []v1beta1.ParamSpec) slsa.ProvenanceInvocation { + i := slsa.ProvenanceInvocation{} + iParams := make(map[string]v1beta1.ArrayOrString) + + // get implicit parameters from defaults + for _, p := range paramSpecs { + if p.Default != nil { + iParams[p.Name] = *p.Default + } + } + + // get explicit parameters + for _, p := range params { + iParams[p.Name] = p.Value + } + + i.Parameters = iParams + return i +} + +// supports the SPDX format which is recommended by in-toto +// ref: https://spdx.dev/spdx-specification-21-web-version/#h.49x2ik5 +// ref: https://github.com/in-toto/attestation/blob/849867bee97e33678f61cc6bd5da293097f84c25/spec/field_types.md +func SPDXGit(url, revision string) string { + prefix := "git+" + if revision == "" { + return prefix + url + ".git" + } + return prefix + url + fmt.Sprintf("@%s", revision) +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/extract/extract.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/extract/extract.go new file mode 100644 index 0000000000..062f5b678b --- /dev/null +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/extract/extract.go @@ -0,0 +1,107 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extract + +import ( + "sort" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + intoto "github.com/in-toto/in-toto-golang/in_toto" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/tektoncd/chains/pkg/artifacts" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "go.uber.org/zap" +) + +// GetSubjectDigests extracts OCI images from the TaskRun based on standard hinting set up +// It also goes through looking for any PipelineResources of Image type +func SubjectDigests(obj objects.TektonObject, logger *zap.SugaredLogger) []intoto.Subject { + var subjects []intoto.Subject + + imgs := artifacts.ExtractOCIImagesFromResults(obj, logger) + for _, i := range imgs { + if d, ok := i.(name.Digest); ok { + subjects = append(subjects, intoto.Subject{ + Name: d.Repository.Name(), + Digest: slsa.DigestSet{ + "sha256": strings.TrimPrefix(d.DigestStr(), "sha256:"), + }, + }) + } + } + + sts := artifacts.ExtractSignableTargetFromResults(obj, logger) + for _, obj := range sts { + splits := strings.Split(obj.Digest, ":") + if len(splits) != 2 { + logger.Errorf("Digest %s should be in the format of: algorthm:abc", obj.Digest) + continue + } + subjects = append(subjects, intoto.Subject{ + Name: obj.URI, + Digest: slsa.DigestSet{ + splits[0]: splits[1], + }, + }) + } + + // Check if object is a Taskrun, if so search for images used in PipelineResources + // Otherwise object is a PipelineRun, where Pipelineresources are not relevant. + // PipelineResources have been deprecated so their support has been left out of + // the POC for TEP-84 + // More info: https://tekton.dev/docs/pipelines/resources/ + tr, ok := obj.GetObject().(*v1beta1.TaskRun) + if !ok || tr.Spec.Resources == nil { + return subjects + } + + // go through resourcesResult + for _, output := range tr.Spec.Resources.Outputs { + name := output.Name + if output.PipelineResourceBinding.ResourceSpec == nil { + continue + } + // similarly, we could do this for other pipeline resources or whatever thing replaces them + if output.PipelineResourceBinding.ResourceSpec.Type == v1alpha1.PipelineResourceTypeImage { + // get the url and digest, and save as a subject + var url, digest string + for _, s := range tr.Status.ResourcesResult { + if s.ResourceName == name { + if s.Key == "url" { + url = s.Value + } + if s.Key == "digest" { + digest = s.Value + } + } + } + subjects = append(subjects, intoto.Subject{ + Name: url, + Digest: slsa.DigestSet{ + "sha256": strings.TrimPrefix(digest, "sha256:"), + }, + }) + } + } + sort.Slice(subjects, func(i, j int) bool { + return subjects[i].Name <= subjects[j].Name + }) + return subjects +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/intotoite6.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/intotoite6.go index b46e0535df..c1fbb38a69 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/intotoite6.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/intotoite6.go @@ -18,26 +18,15 @@ package intotoite6 import ( "fmt" - "sort" - "strings" - "github.com/google/go-containerregistry/pkg/name" - intoto "github.com/in-toto/in-toto-golang/in_toto" - slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" - "github.com/tektoncd/chains/pkg/artifacts" "github.com/tektoncd/chains/pkg/chains/formats" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/pipelinerun" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "go.uber.org/zap" ) -const ( - commitParam = "CHAINS-GIT_COMMIT" - urlParam = "CHAINS-GIT_URL" - ChainsReproducibleAnnotation = "chains.tekton.dev/reproducible" -) - type InTotoIte6 struct { builderID string logger *zap.SugaredLogger @@ -55,260 +44,16 @@ func (i *InTotoIte6) Wrap() bool { } func (i *InTotoIte6) CreatePayload(obj interface{}) (interface{}, error) { - var tr *v1beta1.TaskRun switch v := obj.(type) { - case *v1beta1.TaskRun: - tr = v + case *objects.TaskRunObject: + return taskrun.GenerateAttestation(i.builderID, v, i.logger) + case *objects.PipelineRunObject: + return pipelinerun.GenerateAttestation(i.builderID, v, i.logger) default: return nil, fmt.Errorf("intoto does not support type: %s", v) } - return i.generateAttestationFromTaskRun(tr) -} - -// generateAttestationFromTaskRun translates a Tekton TaskRun into an in-toto attestation -// with the slsa-provenance predicate type -func (i *InTotoIte6) generateAttestationFromTaskRun(tr *v1beta1.TaskRun) (interface{}, error) { - subjects := GetSubjectDigests(tr, i.logger) - - att := intoto.ProvenanceStatement{ - StatementHeader: intoto.StatementHeader{ - Type: intoto.StatementInTotoV01, - PredicateType: slsa.PredicateSLSAProvenance, - Subject: subjects, - }, - Predicate: slsa.ProvenancePredicate{ - Builder: slsa.ProvenanceBuilder{ - ID: i.builderID, - }, - BuildType: fmt.Sprintf("%s/%s", tr.GetGroupVersionKind().GroupVersion().String(), tr.GetGroupVersionKind().Kind), - Invocation: invocation(tr), - BuildConfig: buildConfig(tr), - Metadata: metadata(tr), - Materials: materials(tr), - }, - } - return att, nil -} - -func metadata(tr *v1beta1.TaskRun) *slsa.ProvenanceMetadata { - m := &slsa.ProvenanceMetadata{} - if tr.Status.StartTime != nil { - m.BuildStartedOn = &tr.Status.StartTime.Time - } - if tr.Status.CompletionTime != nil { - m.BuildFinishedOn = &tr.Status.CompletionTime.Time - } - for label, value := range tr.Labels { - if label == ChainsReproducibleAnnotation && value == "true" { - m.Reproducible = true - } - } - return m -} - -// invocation describes the event that kicked off the build -// we currently don't set ConfigSource because we don't know -// which material the Task definition came from -func invocation(tr *v1beta1.TaskRun) slsa.ProvenanceInvocation { - i := slsa.ProvenanceInvocation{} - params := make(map[string]v1beta1.ArrayOrString) - - // get implicit parameters from defaults - if ts := tr.Status.TaskSpec; ts != nil { - for _, p := range ts.Params { - if p.Default != nil { - params[p.Name] = *p.Default - } - } - } - - // get explicit parameters - for _, p := range tr.Spec.Params { - params[p.Name] = p.Value - } - - i.Parameters = params - return i -} - -// GetSubjectDigests extracts OCI images from the TaskRun based on standard hinting set up -// It also goes through looking for any PipelineResources of Image type -func GetSubjectDigests(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) []intoto.Subject { - var subjects []intoto.Subject - - imgs := artifacts.ExtractOCIImagesFromResults(tr, logger) - for _, i := range imgs { - if d, ok := i.(name.Digest); ok { - subjects = append(subjects, intoto.Subject{ - Name: d.Repository.Name(), - Digest: slsa.DigestSet{ - "sha256": strings.TrimPrefix(d.DigestStr(), "sha256:"), - }, - }) - } - } - - sts := artifacts.ExtractSignableTargetFromResults(tr, logger) - for _, obj := range sts { - splits := strings.Split(obj.Digest, ":") - if len(splits) != 2 { - logger.Errorf("Digest %s should be in the format of: algorthm:abc", obj.Digest) - continue - } - subjects = append(subjects, intoto.Subject{ - Name: obj.URI, - Digest: slsa.DigestSet{ - splits[0]: splits[1], - }, - }) - } - - if tr.Spec.Resources == nil { - return subjects - } - - // go through resourcesResult - for _, output := range tr.Spec.Resources.Outputs { - name := output.Name - if output.PipelineResourceBinding.ResourceSpec == nil { - continue - } - // similarly, we could do this for other pipeline resources or whatever thing replaces them - if output.PipelineResourceBinding.ResourceSpec.Type == v1alpha1.PipelineResourceTypeImage { - // get the url and digest, and save as a subject - var url, digest string - for _, s := range tr.Status.ResourcesResult { - if s.ResourceName == name { - if s.Key == "url" { - url = s.Value - } - if s.Key == "digest" { - digest = s.Value - } - } - } - subjects = append(subjects, intoto.Subject{ - Name: url, - Digest: slsa.DigestSet{ - "sha256": strings.TrimPrefix(digest, "sha256:"), - }, - }) - } - } - sort.Slice(subjects, func(i, j int) bool { - return subjects[i].Name <= subjects[j].Name - }) - return subjects -} - -// add any Git specification to materials -func materials(tr *v1beta1.TaskRun) []slsa.ProvenanceMaterial { - var mats []slsa.ProvenanceMaterial - gitCommit, gitURL := gitInfo(tr) - - // Store git rev as Materials and Recipe.Material - if gitCommit != "" && gitURL != "" { - mats = append(mats, slsa.ProvenanceMaterial{ - URI: gitURL, - Digest: map[string]string{"sha1": gitCommit}, - }) - return mats - } - - if tr.Spec.Resources == nil { - return mats - } - - // check for a Git PipelineResource - for _, input := range tr.Spec.Resources.Inputs { - if input.ResourceSpec == nil || input.ResourceSpec.Type != v1alpha1.PipelineResourceTypeGit { - continue - } - - m := slsa.ProvenanceMaterial{ - Digest: slsa.DigestSet{}, - } - - for _, rr := range tr.Status.ResourcesResult { - if rr.ResourceName != input.Name { - continue - } - if rr.Key == "url" { - m.URI = spdxGit(rr.Value, "") - } else if rr.Key == "commit" { - m.Digest["sha1"] = rr.Value - } - } - - var url string - var revision string - for _, param := range input.ResourceSpec.Params { - if param.Name == "url" { - url = param.Value - } - if param.Name == "revision" { - revision = param.Value - } - } - m.URI = spdxGit(url, revision) - mats = append(mats, m) - } - return mats } func (i *InTotoIte6) Type() formats.PayloadType { return formats.PayloadTypeInTotoIte6 } - -// gitInfo scans over the input parameters and looks for parameters -// with specified names. -func gitInfo(tr *v1beta1.TaskRun) (commit string, url string) { - // Scan for git params to use for materials - if tr.Status.TaskSpec != nil { - for _, p := range tr.Status.TaskSpec.Params { - if p.Default == nil { - continue - } - if p.Name == commitParam { - commit = p.Default.StringVal - continue - } - if p.Name == urlParam { - url = p.Default.StringVal - } - } - } - - for _, p := range tr.Spec.Params { - if p.Name == commitParam { - commit = p.Value.StringVal - continue - } - if p.Name == urlParam { - url = p.Value.StringVal - } - } - - for _, r := range tr.Status.TaskRunResults { - if r.Name == commitParam { - commit = r.Value.StringVal - } - if r.Name == urlParam { - url = r.Value.StringVal - } - } - - url = spdxGit(url, "") - return -} - -// supports the SPDX format which is recommended by in-toto -// ref: https://spdx.dev/spdx-specification-21-web-version/#h.49x2ik5 -// ref: https://github.com/in-toto/attestation/blob/849867bee97e33678f61cc6bd5da293097f84c25/spec/field_types.md -func spdxGit(url, revision string) string { - prefix := "git+" - if revision == "" { - return prefix + url + ".git" - } - return prefix + url + fmt.Sprintf("@%s", revision) -} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/pipelinerun/pipelinerun.go new file mode 100644 index 0000000000..760a742c3e --- /dev/null +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/pipelinerun/pipelinerun.go @@ -0,0 +1,237 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pipelinerun + +import ( + "time" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/extract" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" +) + +const ( + TektonPipelineRunID = "https://tekton.dev/attestations/chains/pipelinerun@v2" +) + +type BuildConfig struct { + Tasks []TaskAttestation `json:"tasks"` +} + +type TaskAttestation struct { + Name string `json:"name,omitempty"` + After []string `json:"after,omitempty"` + Ref v1beta1.TaskRef `json:"ref,omitempty"` + StartedOn time.Time `json:"startedOn,omitempty"` + FinishedOn time.Time `json:"finishedOn,omitempty"` + Status string `json:"status,omitempty"` + Steps []attest.StepAttestation `json:"steps,omitempty"` + Invocation slsa.ProvenanceInvocation `json:"invocation,omitempty"` + Results []v1beta1.TaskRunResult `json:"results,omitempty"` +} + +func GenerateAttestation(builderID string, pro *objects.PipelineRunObject, logger *zap.SugaredLogger) (interface{}, error) { + subjects := extract.SubjectDigests(pro, logger) + + att := intoto.ProvenanceStatement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: slsa.PredicateSLSAProvenance, + Subject: subjects, + }, + Predicate: slsa.ProvenancePredicate{ + Builder: slsa.ProvenanceBuilder{ + ID: builderID, + }, + BuildType: TektonPipelineRunID, + Invocation: invocation(pro), + BuildConfig: buildConfig(pro, logger), + Metadata: metadata(pro), + Materials: materials(pro), + }, + } + return att, nil +} + +func invocation(pro *objects.PipelineRunObject) slsa.ProvenanceInvocation { + var paramSpecs []v1beta1.ParamSpec + if ps := pro.Status.PipelineSpec; ps != nil { + paramSpecs = ps.Params + } + return attest.Invocation(pro.Spec.Params, paramSpecs) +} + +func buildConfig(pro *objects.PipelineRunObject, logger *zap.SugaredLogger) BuildConfig { + tasks := []TaskAttestation{} + + pSpec := pro.Status.PipelineSpec + if pSpec == nil { + return BuildConfig{} + } + pipelineTasks := append(pSpec.Tasks, pSpec.Finally...) + + var last string + for i, t := range pipelineTasks { + tr := pro.GetTaskRunFromTask(t.Name) + + // Ignore Tasks that did not execute during the PipelineRun. + if tr == nil || tr.Status.CompletionTime == nil { + logger.Infof("taskrun status not found for task %s", t.Name) + continue + } + steps := []attest.StepAttestation{} + for i, stepState := range tr.Status.Steps { + step := tr.Status.TaskSpec.Steps[i] + steps = append(steps, attest.Step(&step, &stepState)) + } + after := t.RunAfter + + // Establish task order by retrieving all task's referenced + // in the "when" and "params" fields + refs := v1beta1.PipelineTaskResultRefs(&t) + for _, ref := range refs { + + // Ensure task doesn't already exist in after + found := false + for _, at := range after { + if at == ref.PipelineTask { + found = true + } + } + if !found { + after = append(after, ref.PipelineTask) + } + } + + // tr is a finally task without an explicit runAfter value. It must have executed + // after the last non-finally task, if any non-finally tasks were executed. + if len(after) == 0 && i >= len(pSpec.Tasks) && last != "" { + after = append(after, last) + } + params := tr.Spec.Params + var paramSpecs []v1beta1.ParamSpec + if tr.Status.TaskSpec != nil { + paramSpecs = tr.Status.TaskSpec.Params + } else { + paramSpecs = []v1beta1.ParamSpec{} + } + task := TaskAttestation{ + Name: t.Name, + After: after, + StartedOn: tr.Status.StartTime.Time, + FinishedOn: tr.Status.CompletionTime.Time, + Status: getStatus(tr.Status.Conditions), + Steps: steps, + Invocation: attest.Invocation(params, paramSpecs), + Results: tr.Status.TaskRunResults, + } + + if t.TaskRef != nil { + task.Ref = *t.TaskRef + } + + tasks = append(tasks, task) + if i < len(pSpec.Tasks) { + last = task.Name + } + } + return BuildConfig{Tasks: tasks} +} + +func metadata(pro *objects.PipelineRunObject) *slsa.ProvenanceMetadata { + m := &slsa.ProvenanceMetadata{} + if pro.Status.StartTime != nil { + m.BuildStartedOn = &pro.Status.StartTime.Time + } + if pro.Status.CompletionTime != nil { + m.BuildFinishedOn = &pro.Status.CompletionTime.Time + } + for label, value := range pro.Labels { + if label == attest.ChainsReproducibleAnnotation && value == "true" { + m.Reproducible = true + } + } + return m +} + +// add any Git specification to materials +func materials(pro *objects.PipelineRunObject) []slsa.ProvenanceMaterial { + var mats []slsa.ProvenanceMaterial + var commit, url string + // search spec.params + for _, p := range pro.Spec.Params { + if p.Name == attest.CommitParam { + commit = p.Value.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Value.StringVal + } + } + + // search status.PipelineSpec.params + if pro.Status.PipelineSpec != nil { + for _, p := range pro.Status.PipelineSpec.Params { + if p.Default == nil { + continue + } + if p.Name == attest.CommitParam { + commit = p.Default.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Default.StringVal + } + } + } + + // search status.PipelineRunResults + for _, r := range pro.Status.PipelineResults { + if r.Name == attest.CommitParam { + commit = r.Value.StringVal + } + if r.Name == attest.URLParam { + url = r.Value.StringVal + } + } + url = attest.SPDXGit(url, "") + mats = append(mats, slsa.ProvenanceMaterial{ + URI: url, + Digest: map[string]string{"sha1": commit}, + }) + return mats +} + +// Following tkn cli's behavior +// https://github.com/tektoncd/cli/blob/6afbb0f0dbc7186898568f0d4a0436b8b2994d99/pkg/formatted/k8s.go#L55 +func getStatus(conditions []apis.Condition) string { + var status string + if len(conditions) > 0 { + switch conditions[0].Status { + case corev1.ConditionFalse: + status = "Failed" + case corev1.ConditionTrue: + status = "Succeeded" + case corev1.ConditionUnknown: + status = "Running" // Should never happen + } + } + return status +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/buildconfig.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/buildconfig.go similarity index 57% rename from vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/buildconfig.go rename to vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/buildconfig.go index 81d2ce114b..10eb31ebe9 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/buildconfig.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/buildconfig.go @@ -14,18 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package intotoite6 +package taskrun import ( - "strings" - + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) // BuildConfig is the custom Chains format to fill out the // "buildConfig" section of the slsa-provenance predicate type BuildConfig struct { - Steps []Step `json:"steps"` + Steps []attest.StepAttestation `json:"steps"` } // Step corresponds to one step in the TaskRun @@ -36,39 +36,22 @@ type Step struct { Annotations map[string]string `json:"annotations"` } -func buildConfig(tr *v1beta1.TaskRun) BuildConfig { - steps := []Step{} - for _, step := range tr.Status.Steps { - s := Step{} - c := container(step, tr) - // get the entrypoint - entrypoint := strings.Join(c.Command, " ") - if c.Script != "" { - entrypoint = c.Script - } - s.EntryPoint = entrypoint - s.Arguments = c.Args - - // env comprises of: - env := map[string]interface{}{} - env["image"] = step.ImageID - env["container"] = step.Name - s.Environment = env - - // append to all of the steps - steps = append(steps, s) +func buildConfig(tro *objects.TaskRunObject) BuildConfig { + attestations := []attest.StepAttestation{} + for _, stepState := range tro.Status.Steps { + step := stepFromTaskRun(stepState.Name, tro) + attestations = append(attestations, attest.Step(step, &stepState)) } - return BuildConfig{Steps: steps} + return BuildConfig{Steps: attestations} } -func container(stepState v1beta1.StepState, tr *v1beta1.TaskRun) v1beta1.Step { - name := stepState.Name - if tr.Status.TaskSpec != nil { - for _, s := range tr.Status.TaskSpec.Steps { +func stepFromTaskRun(name string, tro *objects.TaskRunObject) *v1beta1.Step { + if tro.Status.TaskSpec != nil { + for _, s := range tro.Status.TaskSpec.Steps { if s.Name == name { - return s + return &s } } } - return v1beta1.Step{} + return &v1beta1.Step{} } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/taskrun.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/taskrun.go new file mode 100644 index 0000000000..6f4f71caa0 --- /dev/null +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/intotoite6/taskrun/taskrun.go @@ -0,0 +1,180 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package taskrun + +import ( + "fmt" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/attest" + "github.com/tektoncd/chains/pkg/chains/formats/intotoite6/extract" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "go.uber.org/zap" +) + +const ( + TektonID = "https://tekton.dev/attestations/chains@v2" +) + +func GenerateAttestation(builderID string, tro *objects.TaskRunObject, logger *zap.SugaredLogger) (interface{}, error) { + subjects := extract.SubjectDigests(tro, logger) + + tr := tro.GetObject().(*v1beta1.TaskRun) + + att := intoto.ProvenanceStatement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: slsa.PredicateSLSAProvenance, + Subject: subjects, + }, + Predicate: slsa.ProvenancePredicate{ + Builder: slsa.ProvenanceBuilder{ + ID: builderID, + }, + BuildType: fmt.Sprintf("%s/%s", tr.GetGroupVersionKind().GroupVersion().String(), tr.GetGroupVersionKind().Kind), + Invocation: invocation(tro), + BuildConfig: buildConfig(tro), + Metadata: metadata(tro), + Materials: materials(tro), + }, + } + return att, nil +} + +// invocation describes the event that kicked off the build +// we currently don't set ConfigSource because we don't know +// which material the Task definition came from +func invocation(tro *objects.TaskRunObject) slsa.ProvenanceInvocation { + var paramSpecs []v1beta1.ParamSpec + if ts := tro.Status.TaskSpec; ts != nil { + paramSpecs = ts.Params + } + return attest.Invocation(tro.Spec.Params, paramSpecs) +} + +func metadata(tro *objects.TaskRunObject) *slsa.ProvenanceMetadata { + m := &slsa.ProvenanceMetadata{} + if tro.Status.StartTime != nil { + m.BuildStartedOn = &tro.Status.StartTime.Time + } + if tro.Status.CompletionTime != nil { + m.BuildFinishedOn = &tro.Status.CompletionTime.Time + } + for label, value := range tro.Labels { + if label == attest.ChainsReproducibleAnnotation && value == "true" { + m.Reproducible = true + } + } + return m +} + +// add any Git specification to materials +func materials(tro *objects.TaskRunObject) []slsa.ProvenanceMaterial { + var mats []slsa.ProvenanceMaterial + gitCommit, gitURL := gitInfo(tro) + + // Store git rev as Materials and Recipe.Material + if gitCommit != "" && gitURL != "" { + mats = append(mats, slsa.ProvenanceMaterial{ + URI: gitURL, + Digest: map[string]string{"sha1": gitCommit}, + }) + return mats + } + + if tro.Spec.Resources == nil { + return mats + } + + // check for a Git PipelineResource + for _, input := range tro.Spec.Resources.Inputs { + if input.ResourceSpec == nil || input.ResourceSpec.Type != v1alpha1.PipelineResourceTypeGit { + continue + } + + m := slsa.ProvenanceMaterial{ + Digest: slsa.DigestSet{}, + } + + for _, rr := range tro.Status.ResourcesResult { + if rr.ResourceName != input.Name { + continue + } + if rr.Key == "url" { + m.URI = attest.SPDXGit(rr.Value, "") + } else if rr.Key == "commit" { + m.Digest["sha1"] = rr.Value + } + } + + var url string + var revision string + for _, param := range input.ResourceSpec.Params { + if param.Name == "url" { + url = param.Value + } + if param.Name == "revision" { + revision = param.Value + } + } + m.URI = attest.SPDXGit(url, revision) + mats = append(mats, m) + } + return mats +} + +// gitInfo scans over the input parameters and looks for parameters +// with specified names. +func gitInfo(tro *objects.TaskRunObject) (commit string, url string) { + // Scan for git params to use for materials + if tro.Status.TaskSpec != nil { + for _, p := range tro.Status.TaskSpec.Params { + if p.Default == nil { + continue + } + if p.Name == attest.CommitParam { + commit = p.Default.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Default.StringVal + } + } + } + + for _, p := range tro.Spec.Params { + if p.Name == attest.CommitParam { + commit = p.Value.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Value.StringVal + } + } + + for _, r := range tro.Status.TaskRunResults { + if r.Name == attest.CommitParam { + commit = r.Value.StringVal + } + if r.Name == attest.URLParam { + url = r.Value.StringVal + } + } + + url = attest.SPDXGit(url, "") + return +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/formats/tekton/tekton.go b/vendor/github.com/tektoncd/chains/pkg/chains/formats/tekton/tekton.go index b47393dc36..9b7903cc8c 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/formats/tekton/tekton.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/formats/tekton/tekton.go @@ -17,8 +17,7 @@ import ( "fmt" "github.com/tektoncd/chains/pkg/chains/formats" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/chains/pkg/chains/objects" ) // Tekton is a formatter that just captures the TaskRun Status with no modifications. @@ -31,14 +30,14 @@ func NewFormatter() (formats.Payloader, error) { // CreatePayload implements the Payloader interface. func (i *Tekton) CreatePayload(obj interface{}) (interface{}, error) { - switch v := obj.(type) { - case *v1beta1.TaskRun: + case *objects.TaskRunObject: + return v.Status, nil + case *objects.PipelineRunObject: return v.Status, nil default: return nil, fmt.Errorf("unsupported type %s", v) } - } func (i *Tekton) Type() formats.PayloadType { diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/objects/objects.go b/vendor/github.com/tektoncd/chains/pkg/chains/objects/objects.go new file mode 100644 index 0000000000..ca9199ecca --- /dev/null +++ b/vendor/github.com/tektoncd/chains/pkg/chains/objects/objects.go @@ -0,0 +1,181 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objects + +import ( + "context" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Label added to TaskRuns identifying the associated pipeline Task +const PipelineTaskLabel = "tekton.dev/pipelineTask" + +// Object is used as a base object of all Kubernetes objects +// ref: https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.4/pkg/client#Object +type Object interface { + // Metadata associated to all Kubernetes objects + metav1.Object + // Runtime identifying data + runtime.Object +} + +// Result is a generic key value store containing the results +// of Tekton operations. (eg. PipelineRun and TaskRun results) +type Result struct { + Name string + Type v1beta1.ResultsType + Value v1beta1.ArrayOrString +} + +// Tekton object is an extended Kubernetes object with operations specific +// to Tekton objects. +type TektonObject interface { + Object + GetKind() string + GetObject() interface{} + GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) + Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error + GetResults() []Result + GetServiceAccountName() string +} + +// TaskRunObject extends v1beta1.TaskRun with additional functions. +type TaskRunObject struct { + *v1beta1.TaskRun +} + +func NewTaskRunObject(tr *v1beta1.TaskRun) *TaskRunObject { + return &TaskRunObject{ + tr, + } +} + +// Get the TaskRun kind +func (tro *TaskRunObject) GetKind() string { + return tro.GetObjectKind().GroupVersionKind().Kind +} + +// Get the latest annotations on the TaskRun +func (tro *TaskRunObject) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + tr, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Get(ctx, tro.Name, v1.GetOptions{}) + return tr.Annotations, err +} + +// Get the base TaskRun object +func (tro *TaskRunObject) GetObject() interface{} { + return tro.TaskRun +} + +// Patch the original TaskRun object +func (tro *TaskRunObject) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Patch( + ctx, tro.Name, types.MergePatchType, patchBytes, v1.PatchOptions{}) + return err +} + +// Get the TaskRun results +func (tro *TaskRunObject) GetResults() []Result { + res := []Result{} + for _, key := range tro.Status.TaskRunResults { + res = append(res, Result{ + Name: key.Name, + Value: key.Value, + }) + } + return res +} + +// Get the ServiceAccount declared in the TaskRun +func (tro *TaskRunObject) GetServiceAccountName() string { + return tro.Spec.ServiceAccountName +} + +// PipelineRunObject extends v1beta1.PipelineRun with additional functions. +type PipelineRunObject struct { + // The base PipelineRun + *v1beta1.PipelineRun + // TaskRuns that were apart of this PipelineRun + taskRuns []*v1beta1.TaskRun +} + +func NewPipelineRunObject(pr *v1beta1.PipelineRun) *PipelineRunObject { + return &PipelineRunObject{ + PipelineRun: pr, + } +} + +// Get the PipelineRun kind +func (pro *PipelineRunObject) GetKind() string { + return pro.GetObjectKind().GroupVersionKind().Kind +} + +// Request the current annotations on the PipelineRun object +func (pro *PipelineRunObject) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + pr, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Get(ctx, pro.Name, v1.GetOptions{}) + return pr.Annotations, err +} + +// Get the base PipelineRun +func (pro *PipelineRunObject) GetObject() interface{} { + return pro.PipelineRun +} + +// Patch the original PipelineRun object +func (pro *PipelineRunObject) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Patch( + ctx, pro.Name, types.MergePatchType, patchBytes, v1.PatchOptions{}) + return err +} + +// Get the resolved Pipelinerun results +func (pro *PipelineRunObject) GetResults() []Result { + res := []Result{} + for _, key := range pro.Status.PipelineResults { + res = append(res, Result{ + Name: key.Name, + Value: v1beta1.ArrayOrString{ + Type: v1beta1.ParamTypeString, + StringVal: key.Value.StringVal, + }, + }) + } + return res +} + +// Get the ServiceAccount declared in the PipelineRun +func (pro *PipelineRunObject) GetServiceAccountName() string { + return pro.Spec.ServiceAccountName +} + +// Append TaskRuns to this PipelineRun +func (pro *PipelineRunObject) AppendTaskRun(tr *v1beta1.TaskRun) { + pro.taskRuns = append(pro.taskRuns, tr) +} + +// Get the associated TaskRun via the Task name +func (pro *PipelineRunObject) GetTaskRunFromTask(taskName string) *v1beta1.TaskRun { + for _, tr := range pro.taskRuns { + val, ok := tr.Labels[PipelineTaskLabel] + if ok && val == taskName { + return tr + } + } + return nil +} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/rekor.go b/vendor/github.com/tektoncd/chains/pkg/chains/rekor.go index e53b0a9016..3976b006b4 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/rekor.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/rekor.go @@ -22,9 +22,9 @@ import ( "github.com/sigstore/rekor/pkg/generated/client" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/sigstore/pkg/cryptoutils" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/signing" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" ) @@ -80,7 +80,7 @@ var getRekor = func(url string, l *zap.SugaredLogger) (rekorClient, error) { }, nil } -func shouldUploadTlog(cfg config.Config, tr *v1beta1.TaskRun) bool { +func shouldUploadTlog(cfg config.Config, obj objects.TektonObject) bool { // if transparency isn't enabled, return false if !cfg.Transparency.Enabled { return false @@ -91,9 +91,11 @@ func shouldUploadTlog(cfg config.Config, tr *v1beta1.TaskRun) bool { } // Already uploaded, don't do it again - if _, ok := tr.Annotations[ChainsTransparencyAnnotation]; ok { + if _, ok := obj.GetAnnotations()[ChainsTransparencyAnnotation]; ok { return false } + // verify the annotation - return tr.Annotations[RekorAnnotation] == "true" + ann := obj.GetAnnotations()[RekorAnnotation] + return ann == "true" } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/signing.go b/vendor/github.com/tektoncd/chains/pkg/chains/signing.go index da75d5bba5..1ff3d9ac6a 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/signing.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/signing.go @@ -25,6 +25,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/intotoite6" "github.com/tektoncd/chains/pkg/chains/formats/simple" "github.com/tektoncd/chains/pkg/chains/formats/tekton" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/signing" "github.com/tektoncd/chains/pkg/chains/signing/kms" "github.com/tektoncd/chains/pkg/chains/signing/x509" @@ -37,10 +38,10 @@ import ( ) type Signer interface { - SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) error + Sign(ctx context.Context, obj objects.TektonObject) error } -type TaskRunSigner struct { +type ObjectSigner struct { // Formatters: format payload // The keys are the names of different formatters {tekton, in-toto, simplesigning}. The first two are for TaskRun artifact, and simplesigning is for OCI artifact. // The values are actual `Payloader` interfaces that can generate payload in different format from taskrun. @@ -56,7 +57,11 @@ type TaskRunSigner struct { func allSigners(ctx context.Context, sp string, cfg config.Config, l *zap.SugaredLogger) map[string]signing.Signer { all := map[string]signing.Signer{} - neededSigners := map[string]struct{}{cfg.Artifacts.OCI.Signer: {}, cfg.Artifacts.TaskRuns.Signer: {}} + neededSigners := map[string]struct{}{ + cfg.Artifacts.OCI.Signer: {}, + cfg.Artifacts.TaskRuns.Signer: {}, + cfg.Artifacts.PipelineRuns.Signer: {}, + } for _, s := range signing.AllSigners { if _, ok := neededSigners[s]; !ok { @@ -114,18 +119,35 @@ func AllFormatters(cfg config.Config, l *zap.SugaredLogger) map[formats.PayloadT return all } -// SignTaskRun signs a TaskRun, and marks it as signed. -func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) error { +// TODO: Hook this up to config. +func getSignableTypes(obj objects.TektonObject, logger *zap.SugaredLogger) ([]artifacts.Signable, error) { + switch v := obj.GetObject().(type) { + case *v1beta1.TaskRun: + return []artifacts.Signable{ + &artifacts.TaskRunArtifact{Logger: logger}, + &artifacts.OCIArtifact{Logger: logger}, + }, nil + case *v1beta1.PipelineRun: + return []artifacts.Signable{ + &artifacts.PipelineRunArtifact{Logger: logger}, + }, nil + default: + return nil, fmt.Errorf("unsupported type of object to be signed: %s", v) + } +} + +// Signs TaskRun and PipelineRun objects, as well as generates attesations for each +// Follows process of extract payload, sign payload, store payload and signature +func (o *ObjectSigner) Sign(ctx context.Context, tektonObj objects.TektonObject) error { cfg := *config.FromContext(ctx) logger := logging.FromContext(ctx) - // TODO: Hook this up to config. - enabledSignableTypes := []artifacts.Signable{ - &artifacts.TaskRunArtifact{Logger: logger}, - &artifacts.OCIArtifact{Logger: logger}, + signableTypes, err := getSignableTypes(tektonObj, logger) + if err != nil { + return err } - signers := allSigners(ctx, ts.SecretPath, cfg, logger) + signers := allSigners(ctx, o.SecretPath, cfg, logger) rekorClient, err := getRekor(cfg.Transparency.URL, logger) if err != nil { @@ -134,22 +156,22 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e var merr *multierror.Error extraAnnotations := map[string]string{} - for _, signableType := range enabledSignableTypes { + for _, signableType := range signableTypes { if !signableType.Enabled(cfg) { continue } payloadFormat := signableType.PayloadFormat(cfg) // Find the right payload format and format the object - payloader, ok := ts.Formatters[payloadFormat] + payloader, ok := o.Formatters[payloadFormat] if !ok { - logger.Warnf("Format %s configured for TaskRun: %v %s was not found", payloadFormat, tr, signableType.Type()) + logger.Warnf("Format %s configured for %s: %v was not found", payloadFormat, tektonObj.GetKind(), signableType.Type()) continue } // Extract all the "things" to be signed. // We might have a few of each type (several binaries, or images) - objects := signableType.ExtractObjects(tr) + objects := signableType.ExtractObjects(tektonObj) // Go through each object one at a time. for _, obj := range objects { @@ -159,7 +181,7 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e logger.Error(err) continue } - logger.Infof("Created payload of type %s for TaskRun %s/%s", string(payloadFormat), tr.Namespace, tr.Name) + logger.Infof("Created payload of type %s for %s %s/%s", string(payloadFormat), tektonObj.GetKind(), tektonObj.GetNamespace(), tektonObj.GetName()) // Sign it! signerType := signableType.Signer(cfg) @@ -193,20 +215,20 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e // Now store those! for _, backend := range signableType.StorageBackend(cfg).List() { - b := ts.Backends[backend] + b := o.Backends[backend] storageOpts := config.StorageOpts{ Key: signableType.Key(obj), Cert: signer.Cert(), Chain: signer.Chain(), PayloadFormat: payloadFormat, } - if err := b.StorePayload(ctx, tr, rawPayload, string(signature), storageOpts); err != nil { + if err := b.StorePayload(ctx, tektonObj, rawPayload, string(signature), storageOpts); err != nil { logger.Error(err) merr = multierror.Append(merr, err) } } - if shouldUploadTlog(cfg, tr) { + if shouldUploadTlog(cfg, tektonObj) { entry, err := rekorClient.UploadTlog(ctx, signer, signature, rawPayload, signer.Cert(), string(payloadFormat)) if err != nil { merr = multierror.Append(merr, err) @@ -218,7 +240,7 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e } } if merr.ErrorOrNil() != nil { - if err := HandleRetry(ctx, tr, ts.Pipelineclientset, extraAnnotations); err != nil { + if err := HandleRetry(ctx, tektonObj, o.Pipelineclientset, extraAnnotations); err != nil { merr = multierror.Append(merr, err) } return merr @@ -226,12 +248,12 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e } // Now mark the TaskRun as signed - return MarkSigned(ctx, tr, ts.Pipelineclientset, extraAnnotations) + return MarkSigned(ctx, tektonObj, o.Pipelineclientset, extraAnnotations) } -func HandleRetry(ctx context.Context, tr *v1beta1.TaskRun, ps versioned.Interface, annotations map[string]string) error { - if RetryAvailable(tr) { - return AddRetry(ctx, tr, ps, annotations) +func HandleRetry(ctx context.Context, obj objects.TektonObject, ps versioned.Interface, annotations map[string]string) error { + if RetryAvailable(obj) { + return AddRetry(ctx, obj, ps, annotations) } - return MarkFailed(ctx, tr, ps, annotations) + return MarkFailed(ctx, obj, ps, annotations) } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go index 9eda28e765..81a20c73c3 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/docdb/docdb.go @@ -18,8 +18,8 @@ import ( "encoding/base64" "encoding/json" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" "gocloud.dev/docstore" _ "gocloud.dev/docstore/awsdynamodb" @@ -62,7 +62,7 @@ func NewStorageBackend(ctx context.Context, logger *zap.SugaredLogger, cfg confi } // StorePayload implements the Payloader interface. -func (b *Backend) StorePayload(ctx context.Context, _ *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error { +func (b *Backend) StorePayload(ctx context.Context, _ objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { var obj interface{} if err := json.Unmarshal(rawPayload, &obj); err != nil { return err @@ -88,7 +88,7 @@ func (b *Backend) Type() string { return StorageTypeDocDB } -func (b *Backend) RetrieveSignatures(ctx context.Context, _ *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { +func (b *Backend) RetrieveSignatures(ctx context.Context, _ objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { // Retrieve the document. documents, err := b.retrieveDocuments(ctx, opts) if err != nil { @@ -107,7 +107,7 @@ func (b *Backend) RetrieveSignatures(ctx context.Context, _ *v1beta1.TaskRun, op return m, nil } -func (b *Backend) RetrievePayloads(ctx context.Context, _ *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { +func (b *Backend) RetrievePayloads(ctx context.Context, _ objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { documents, err := b.retrieveDocuments(ctx, opts) if err != nil { return nil, err diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/gcs/gcs.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/gcs/gcs.go index de70eb6a7e..e7d4b35511 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/gcs/gcs.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/gcs/gcs.go @@ -21,6 +21,7 @@ import ( "cloud.google.com/go/storage" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" @@ -60,7 +61,9 @@ func NewStorageBackend(ctx context.Context, logger *zap.SugaredLogger, cfg confi } // StorePayload implements the storage.Backend interface. -func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error { +func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + // TODO: Handle unsupported type gracefully + tr := obj.GetObject().(*v1beta1.TaskRun) // We need multiple objects: the signature and the payload. We want to make these unique to the UID, but easy to find based on the // name/namespace as well. // $bucket/taskrun-$namespace-$name/$key.signature @@ -141,7 +144,9 @@ func (r *reader) GetReader(ctx context.Context, object string) (io.ReadCloser, e return r.client.Bucket(r.bucket).Object(object).NewReader(ctx) } -func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { +func (b *Backend) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + // TODO: Handle unsupported type gracefully + tr := obj.GetObject().(*v1beta1.TaskRun) object := sigName(tr, opts) signature, err := b.retrieveObject(ctx, object) if err != nil { @@ -153,7 +158,9 @@ func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, o return m, nil } -func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { +func (b *Backend) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { + // TODO: Handle unsupported type gracefully + tr := obj.GetObject().(*v1beta1.TaskRun) object := payloadName(tr, opts) m := make(map[string]string) payload, err := b.retrieveObject(ctx, object) diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/grafeas/grafeas.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/grafeas/grafeas.go index 610c5f8381..30db76733a 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/grafeas/grafeas.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/grafeas/grafeas.go @@ -28,6 +28,7 @@ import ( "github.com/sigstore/cosign/pkg/types" "github.com/tektoncd/chains/pkg/artifacts" "github.com/tektoncd/chains/pkg/chains/formats" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" @@ -87,7 +88,9 @@ func NewStorageBackend(ctx context.Context, logger *zap.SugaredLogger, cfg confi } // StorePayload implements the storage.Backend interface. -func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error { +func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + // TODO: Gracefully handle unexpected type + tr := obj.GetObject().(*v1beta1.TaskRun) // We only support simplesigning for OCI images, and in-toto for taskrun. if opts.PayloadFormat != formats.PayloadTypeInTotoIte6 && opts.PayloadFormat != formats.PayloadTypeSimpleSigning { return errors.New("Grafeas storage backend only supports simplesigning and intoto payload format.") @@ -126,7 +129,9 @@ func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayl } // Retrieve payloads from grafeas server and store it in a map -func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { +func (b *Backend) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { + // TODO: Gracefully handle unexpected type + tr := obj.GetObject().(*v1beta1.TaskRun) // initialize an empty map for result result := make(map[string]string) @@ -150,7 +155,9 @@ func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opt } // Retrieve signatures from grafeas server and store it in a map -func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { +func (b *Backend) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + // TODO: Gracefully handle unexpected type + tr := obj.GetObject().(*v1beta1.TaskRun) // initialize an empty map for result result := make(map[string][]string) @@ -409,7 +416,8 @@ func (b *Backend) retrieveSingleOCIURI(tr *v1beta1.TaskRun, opts config.StorageO func (b *Backend) retrieveAllArtifactIdentifiers(tr *v1beta1.TaskRun) []string { result := []string{} // for image artifacts - images := artifacts.ExtractOCIImagesFromResults(tr, b.logger) + trObj := objects.NewTaskRunObject(tr) + images := artifacts.ExtractOCIImagesFromResults(trObj, b.logger) for _, image := range images { ref, ok := image.(name.Digest) if !ok { @@ -419,7 +427,7 @@ func (b *Backend) retrieveAllArtifactIdentifiers(tr *v1beta1.TaskRun) []string { } // for other signable artifacts - artifacts := artifacts.ExtractSignableTargetFromResults(tr, b.logger) + artifacts := artifacts.ExtractSignableTargetFromResults(trObj, b.logger) for _, a := range artifacts { result = append(result, a.FullRef()) } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/oci/oci.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/oci/oci.go index b036647350..2afed99873 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/oci/oci.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/oci/oci.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/tektoncd/chains/pkg/chains/formats" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/in-toto/in-toto-golang/in_toto" "github.com/secure-systems-lab/go-securesystemslib/dsse" @@ -49,7 +49,7 @@ type Backend struct { logger *zap.SugaredLogger cfg config.Config client kubernetes.Interface - getAuthenticator func(ctx context.Context, tr *v1beta1.TaskRun, client kubernetes.Interface) (remote.Option, error) + getAuthenticator func(ctx context.Context, obj objects.TektonObject, client kubernetes.Interface) (remote.Option, error) } // NewStorageBackend returns a new OCI StorageBackend that stores signatures in an OCI registry @@ -58,9 +58,9 @@ func NewStorageBackend(ctx context.Context, logger *zap.SugaredLogger, client ku logger: logger, cfg: cfg, client: client, - getAuthenticator: func(ctx context.Context, tr *v1beta1.TaskRun, client kubernetes.Interface) (remote.Option, error) { + getAuthenticator: func(ctx context.Context, obj objects.TektonObject, client kubernetes.Interface) (remote.Option, error) { kc, err := k8schain.New(ctx, client, - k8schain.Options{Namespace: tr.Namespace, ServiceAccountName: tr.Spec.ServiceAccountName}) + k8schain.Options{Namespace: obj.GetNamespace(), ServiceAccountName: obj.GetServiceAccountName()}) if err != nil { return nil, err } @@ -70,13 +70,13 @@ func NewStorageBackend(ctx context.Context, logger *zap.SugaredLogger, client ku } // StorePayload implements the storage.Backend interface. -func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, storageOpts config.StorageOpts) error { - auth, err := b.getAuthenticator(ctx, tr, b.client) +func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, storageOpts config.StorageOpts) error { + auth, err := b.getAuthenticator(ctx, obj, b.client) if err != nil { return err } - b.logger.Infof("Storing payload on TaskRun %s/%s", tr.Namespace, tr.Name) + b.logger.Infof("Storing payload on %s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) if storageOpts.PayloadFormat == formats.PayloadTypeSimpleSigning { format := simple.SimpleContainerImage{} @@ -97,8 +97,7 @@ func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayl // that is not intended to produce an image, e.g. git-clone. if len(attestation.Subject) == 0 { b.logger.Infof( - "No image subject to attest for TaskRun %s/%s. Skipping upload to registry", - tr.Namespace, tr.Name) + "No image subject to attest for %s/%s/%s. Skipping upload to registry", obj.GetKind(), obj.GetNamespace(), obj.GetName()) return nil } @@ -207,8 +206,8 @@ func (b *Backend) Type() string { return StorageBackendOCI } -func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { - images, err := b.RetrieveArtifact(ctx, tr, opts) +func (b *Backend) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + images, err := b.RetrieveArtifact(ctx, obj, opts) if err != nil { return nil, err } @@ -235,9 +234,9 @@ func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, o return m, nil } -func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { +func (b *Backend) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { var err error - images, err := b.RetrieveArtifact(ctx, tr, opts) + images, err := b.RetrieveArtifact(ctx, obj, opts) if err != nil { return nil, err } @@ -279,9 +278,9 @@ func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opt return m, nil } -func (b *Backend) RetrieveArtifact(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]oci.SignedImage, error) { +func (b *Backend) RetrieveArtifact(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]oci.SignedImage, error) { // Given the TaskRun, retrieve the OCI images. - images := artifacts.ExtractOCIImagesFromResults(tr, b.logger) + images := artifacts.ExtractOCIImagesFromResults(obj, b.logger) m := make(map[string]oci.SignedImage) for _, image := range images { diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/pubsub/pubsub.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/pubsub/pubsub.go index 1103b1f590..59875f6a89 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/pubsub/pubsub.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/pubsub/pubsub.go @@ -18,8 +18,8 @@ import ( "encoding/base64" "fmt" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" "gocloud.dev/pubsub/kafkapubsub" @@ -52,8 +52,8 @@ func (b *Backend) Type() string { return StorageBackendPubSub } -func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error { - b.logger.Infof("Storing payload on TaskRun %s/%s", tr.Namespace, tr.Name) +func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + b.logger.Infof("Storing payload on Object %s/%s", obj.GetNamespace(), obj.GetName()) // Construct a *pubsub.Topic. topic, err := b.NewTopic() @@ -81,11 +81,11 @@ func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayl return nil } -func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { +func (b *Backend) RetrievePayloads(ctx context.Context, _ objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { return nil, fmt.Errorf("not implemented for this storage backend: %s", b.Type()) } -func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { +func (b *Backend) RetrieveSignatures(ctx context.Context, _ objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { return nil, fmt.Errorf("not implemented for this storage backend: %s", b.Type()) } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/storage.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/storage.go index 98507d675e..25c6846e75 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/storage.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/storage.go @@ -16,6 +16,7 @@ package storage import ( "context" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/storage/docdb" "github.com/tektoncd/chains/pkg/chains/storage/gcs" "github.com/tektoncd/chains/pkg/chains/storage/grafeas" @@ -23,7 +24,6 @@ import ( "github.com/tektoncd/chains/pkg/chains/storage/pubsub" "github.com/tektoncd/chains/pkg/chains/storage/tekton" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "go.uber.org/zap" "k8s.io/client-go/kubernetes" @@ -31,11 +31,11 @@ import ( // Backend is an interface to store a chains Payload type Backend interface { - StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error + StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error // RetrievePayloads maps [ref]:[payload] for a TaskRun - RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) + RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) // RetrieveSignatures maps [ref]:[list of signatures] for a TaskRun - RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) + RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) // Type is the string representation of the backend Type() string } @@ -50,6 +50,9 @@ func InitializeBackends(ctx context.Context, ps versioned.Interface, kc kubernet if cfg.Artifacts.OCI.Enabled() { configuredBackends = append(configuredBackends, cfg.Artifacts.OCI.StorageBackend.List()...) } + if cfg.Artifacts.PipelineRuns.Enabled() { + configuredBackends = append(configuredBackends, cfg.Artifacts.PipelineRuns.StorageBackend.List()...) + } // Now only initialize and return the configured ones. backends := map[string]Backend{} diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/storage/tekton/tekton.go b/vendor/github.com/tektoncd/chains/pkg/chains/storage/tekton/tekton.go index 151afc916c..e586986b43 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/storage/tekton/tekton.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/storage/tekton/tekton.go @@ -17,14 +17,13 @@ import ( "context" "encoding/base64" "fmt" + + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/chains/pkg/patch" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "go.uber.org/zap" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" ) const ( @@ -51,8 +50,8 @@ func NewStorageBackend(ps versioned.Interface, logger *zap.SugaredLogger) *Backe } // StorePayload implements the Payloader interface. -func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayload []byte, signature string, opts config.StorageOpts) error { - b.logger.Infof("Storing payload on TaskRun %s/%s", tr.Namespace, tr.Name) +func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { + b.logger.Infof("Storing payload on %s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) // Use patch instead of update to prevent race conditions. patchBytes, err := patch.GetAnnotationsPatch(map[string]string{ @@ -65,9 +64,10 @@ func (b *Backend) StorePayload(ctx context.Context, tr *v1beta1.TaskRun, rawPayl if err != nil { return err } - if _, err := b.pipelineclientset.TektonV1beta1().TaskRuns(tr.Namespace).Patch( - ctx, tr.Name, types.MergePatchType, patchBytes, v1.PatchOptions{}); err != nil { - return err + + patchErr := obj.Patch(ctx, b.pipelineclientset, patchBytes) + if patchErr != nil { + return patchErr } return nil } @@ -77,29 +77,27 @@ func (b *Backend) Type() string { } // retrieveAnnotationValue retrieve the value of an annotation and base64 decode it if needed. -func (b *Backend) retrieveAnnotationValue(ctx context.Context, tr *v1beta1.TaskRun, annotationKey string, decode bool) (string, error) { - // Retrieve the TaskRun. - b.logger.Infof("Retrieving annotation %q on TaskRun %s/%s", annotationKey, tr.Namespace, tr.Name) - tr, err := b.pipelineclientset.TektonV1beta1().TaskRuns(tr.Namespace).Get(ctx, tr.Name, v1.GetOptions{}) - if err != nil { - return "", fmt.Errorf("error retrieving taskrun: %s", err) - } +func (b *Backend) retrieveAnnotationValue(ctx context.Context, obj objects.TektonObject, annotationKey string, decode bool) (string, error) { + b.logger.Infof("Retrieving annotation %q on %s/%s/%s", annotationKey, obj.GetKind(), obj.GetNamespace(), obj.GetName()) - // Retrieve the annotation. var annotationValue string - rawAnnotationValue, exists := tr.Annotations[annotationKey] + annotations, err := obj.GetLatestAnnotations(ctx, b.pipelineclientset) + if err != nil { + return "", fmt.Errorf("error retrieving the annotation value for the key %q: %s", annotationKey, err) + } + val, ok := annotations[annotationKey] // Ensure it exists. - if exists { + if ok { // Decode it if needed. if decode { - decodedAnnotation, err := base64.StdEncoding.DecodeString(rawAnnotationValue) + decodedAnnotation, err := base64.StdEncoding.DecodeString(val) if err != nil { return "", fmt.Errorf("error decoding the annotation value for the key %q: %s", annotationKey, err) } annotationValue = string(decodedAnnotation) } else { - annotationValue = rawAnnotationValue + annotationValue = val } } @@ -107,24 +105,23 @@ func (b *Backend) retrieveAnnotationValue(ctx context.Context, tr *v1beta1.TaskR } // RetrieveSignature retrieve the signature stored in the taskrun. -func (b *Backend) RetrieveSignatures(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string][]string, error) { - b.logger.Infof("Retrieving signature on TaskRun %s/%s", tr.Namespace, tr.Name) +func (b *Backend) RetrieveSignatures(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string][]string, error) { + b.logger.Infof("Retrieving signature on %s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) signatureAnnotation := sigName(opts) - signature, err := b.retrieveAnnotationValue(ctx, tr, signatureAnnotation, true) + signature, err := b.retrieveAnnotationValue(ctx, obj, signatureAnnotation, true) if err != nil { return nil, err } - m := make(map[string][]string) m[signatureAnnotation] = []string{signature} return m, nil } // RetrievePayload retrieve the payload stored in the taskrun. -func (b *Backend) RetrievePayloads(ctx context.Context, tr *v1beta1.TaskRun, opts config.StorageOpts) (map[string]string, error) { - b.logger.Infof("Retrieving payload on TaskRun %s/%s", tr.Namespace, tr.Name) +func (b *Backend) RetrievePayloads(ctx context.Context, obj objects.TektonObject, opts config.StorageOpts) (map[string]string, error) { + b.logger.Infof("Retrieving payload on %s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName()) payloadAnnotation := payloadName(opts) - payload, err := b.retrieveAnnotationValue(ctx, tr, payloadAnnotation, true) + payload, err := b.retrieveAnnotationValue(ctx, obj, payloadAnnotation, true) if err != nil { return nil, err } diff --git a/vendor/github.com/tektoncd/chains/pkg/chains/verifier.go b/vendor/github.com/tektoncd/chains/pkg/chains/verifier.go index fdeac4dd67..ebb488a789 100644 --- a/vendor/github.com/tektoncd/chains/pkg/chains/verifier.go +++ b/vendor/github.com/tektoncd/chains/pkg/chains/verifier.go @@ -18,6 +18,7 @@ import ( "strings" "github.com/tektoncd/chains/pkg/artifacts" + "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -48,6 +49,8 @@ func (tv *TaskRunVerifier) VerifyTaskRun(ctx context.Context, tr *v1beta1.TaskRu &artifacts.OCIArtifact{Logger: logger}, } + trObj := objects.NewTaskRunObject(tr) + // Storage allBackends, err := storage.InitializeBackends(ctx, tv.Pipelineclientset, tv.KubeClient, logger, cfg) if err != nil { @@ -69,11 +72,11 @@ func (tv *TaskRunVerifier) VerifyTaskRun(ctx context.Context, tr *v1beta1.TaskRu for _, backend := range signableType.StorageBackend(cfg).List() { b := allBackends[backend] - signatures, err := b.RetrieveSignatures(ctx, tr, config.StorageOpts{}) + signatures, err := b.RetrieveSignatures(ctx, trObj, config.StorageOpts{}) if err != nil { return err } - payload, err := b.RetrievePayloads(ctx, tr, config.StorageOpts{}) + payload, err := b.RetrievePayloads(ctx, trObj, config.StorageOpts{}) if err != nil { return err } diff --git a/vendor/github.com/tektoncd/chains/pkg/config/config.go b/vendor/github.com/tektoncd/chains/pkg/config/config.go index 03b3d92f53..35d318e843 100644 --- a/vendor/github.com/tektoncd/chains/pkg/config/config.go +++ b/vendor/github.com/tektoncd/chains/pkg/config/config.go @@ -36,8 +36,9 @@ type Config struct { // ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type type ArtifactConfigs struct { - OCI Artifact - TaskRuns Artifact + OCI Artifact + PipelineRuns Artifact + TaskRuns Artifact } // Artifact contains the configuration for how to sign/store/format the signatures for a single artifact @@ -146,6 +147,10 @@ const ( taskrunStorageKey = "artifacts.taskrun.storage" taskrunSignerKey = "artifacts.taskrun.signer" + pipelinerunFormatKey = "artifacts.pipelinerun.format" + pipelinerunStorageKey = "artifacts.pipelinerun.storage" + pipelinerunSignerKey = "artifacts.pipelinerun.signer" + ociFormatKey = "artifacts.oci.format" ociStorageKey = "artifacts.oci.storage" ociSignerKey = "artifacts.oci.signer" @@ -207,6 +212,11 @@ func defaultConfig() *Config { StorageBackend: sets.NewString("tekton"), Signer: "x509", }, + PipelineRuns: Artifact{ + Format: "tekton", + StorageBackend: sets.NewString("tekton"), + Signer: "x509", + }, OCI: Artifact{ Format: "simplesigning", StorageBackend: sets.NewString("oci"), @@ -244,6 +254,11 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { asStringSet(taskrunStorageKey, &cfg.Artifacts.TaskRuns.StorageBackend, sets.NewString("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), asString(taskrunSignerKey, &cfg.Artifacts.TaskRuns.Signer, "x509", "kms"), + // PipelineRuns + asString(pipelinerunFormatKey, &cfg.Artifacts.PipelineRuns.Format, "tekton", "in-toto", "tekton-provenance"), + asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.NewString("tekton", "oci")), + asString(pipelinerunSignerKey, &cfg.Artifacts.PipelineRuns.Signer, "x509", "kms"), + // OCI asString(ociFormatKey, &cfg.Artifacts.OCI.Format, "simplesigning"), asStringSet(ociStorageKey, &cfg.Artifacts.OCI.StorageBackend, sets.NewString("tekton", "oci", "gcs", "docdb", "grafeas", "kafka")), diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go index 43471fd574..5b369909ad 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go @@ -50,7 +50,7 @@ const ( // when it isn't specified in configmap DefaultPipelinerunLevel = PipelinerunLevelAtPipeline // PipelinerunLevelAtPipelinerun specify that aggregation will be done at - // pipelienrun level + // pipelinerun level PipelinerunLevelAtPipelinerun = "pipelinerun" // PipelinerunLevelAtPipeline specify that aggregation will be done at // pipeline level @@ -132,8 +132,8 @@ func newMetricsFromMap(cfgMap map[string]string) (*Metrics, error) { if durationTaskrun, ok := cfgMap[metricsDurationTaskrunType]; ok { tc.DurationTaskrunType = durationTaskrun } - if durationPipelienrun, ok := cfgMap[metricsDurationPipelinerunType]; ok { - tc.DurationPipelinerunType = durationPipelienrun + if durationPipelinerun, ok := cfgMap[metricsDurationPipelinerunType]; ok { + tc.DurationPipelinerunType = durationPipelinerun } return &tc, nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/doc.go new file mode 100644 index 0000000000..caa8b55b92 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/feature_flags.go new file mode 100644 index 0000000000..c43b72461c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/feature_flags.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "fmt" + "os" + "strconv" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // DefaultEnableGitResolver is the default value for "enable-git-resolver". + DefaultEnableGitResolver = false + // DefaultEnableHubResolver is the default value for "enable-hub-resolver". + DefaultEnableHubResolver = false + // DefaultEnableBundlesResolver is the default value for "enable-bundles-resolver". + DefaultEnableBundlesResolver = false + // DefaultEnableClusterResolver is the default value for "enable-cluster-resolver". + DefaultEnableClusterResolver = false + + // EnableGitResolver is the flag used to enable the git remote resolver + EnableGitResolver = "enable-git-resolver" + // EnableHubResolver is the flag used to enable the hub remote resolver + EnableHubResolver = "enable-hub-resolver" + // EnableBundlesResolver is the flag used to enable the bundle remote resolver + EnableBundlesResolver = "enable-bundles-resolver" + // EnableClusterResolver is the flag used to enable the cluster remote resolver + EnableClusterResolver = "enable-cluster-resolver" +) + +// FeatureFlags holds the features configurations +// +k8s:deepcopy-gen=true +type FeatureFlags struct { + EnableGitResolver bool + EnableHubResolver bool + EnableBundleResolver bool + EnableClusterResolver bool +} + +// GetFeatureFlagsConfigName returns the name of the configmap containing all +// feature flags. +func GetFeatureFlagsConfigName() string { + if e := os.Getenv("CONFIG_RESOLVERS_FEATURE_FLAGS_NAME"); e != "" { + return e + } + return "resolvers-feature-flags" +} + +// NewFeatureFlagsFromMap returns a Config given a map corresponding to a ConfigMap +func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { + setFeature := func(key string, defaultValue bool, feature *bool) error { + if cfg, ok := cfgMap[key]; ok { + value, err := strconv.ParseBool(cfg) + if err != nil { + return fmt.Errorf("failed parsing feature flags config %q: %v", cfg, err) + } + *feature = value + return nil + } + *feature = defaultValue + return nil + } + + tc := FeatureFlags{} + if err := setFeature(EnableGitResolver, DefaultEnableGitResolver, &tc.EnableGitResolver); err != nil { + return nil, err + } + if err := setFeature(EnableHubResolver, DefaultEnableHubResolver, &tc.EnableHubResolver); err != nil { + return nil, err + } + if err := setFeature(EnableBundlesResolver, DefaultEnableBundlesResolver, &tc.EnableBundleResolver); err != nil { + return nil, err + } + if err := setFeature(EnableClusterResolver, DefaultEnableClusterResolver, &tc.EnableClusterResolver); err != nil { + return nil, err + } + return &tc, nil +} + +// NewFeatureFlagsFromConfigMap returns a Config for the given configmap +func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, error) { + return NewFeatureFlagsFromMap(config.Data) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/store.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/store.go new file mode 100644 index 0000000000..9d48ba1b29 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/store.go @@ -0,0 +1,102 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resolver + +import ( + "context" + "fmt" + + "knative.dev/pkg/configmap" +) + +type cfgKey struct{} + +// Config holds the collection of configurations that we attach to contexts. +// +k8s:deepcopy-gen=false +type Config struct { + FeatureFlags *FeatureFlags +} + +// ResolversNamespace takes the pipelines namespace and appends "-resolvers" to it. +func ResolversNamespace(baseNS string) string { + return fmt.Sprintf("%s-resolvers", baseNS) +} + +// FromContext extracts a Config from the provided context. +func FromContext(ctx context.Context) *Config { + x, ok := ctx.Value(cfgKey{}).(*Config) + if ok { + return x + } + return nil +} + +// FromContextOrDefaults is like FromContext, but when no Config is attached it +// returns a Config populated with the defaults for each of the Config fields. +func FromContextOrDefaults(ctx context.Context) *Config { + if cfg := FromContext(ctx); cfg != nil { + return cfg + } + featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{}) + return &Config{ + FeatureFlags: featureFlags, + } +} + +// ToContext attaches the provided Config to the provided context, returning the +// new context with the Config attached. +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// Store is a typed wrapper around configmap.Untyped store to handle our configmaps. +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated. +func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "features", + logger, + configmap.Constructors{ + GetFeatureFlagsConfigName(): NewFeatureFlagsFromConfigMap, + }, + onAfterStore..., + ), + } + + return store +} + +// ToContext attaches the current Config state to the provided context. +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +// Load creates a Config from the current config state of the Store. +func (s *Store) Load() *Config { + featureFlags := s.UntypedLoad(GetFeatureFlagsConfigName()) + if featureFlags == nil { + featureFlags, _ = NewFeatureFlagsFromMap(map[string]string{}) + } + return &Config{ + FeatureFlags: featureFlags.(*FeatureFlags).DeepCopy(), + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/zz_generated.deepcopy.go new file mode 100644 index 0000000000..aa89b95f16 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/resolver/zz_generated.deepcopy.go @@ -0,0 +1,38 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package resolver + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureFlags) DeepCopyInto(out *FeatureFlags) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureFlags. +func (in *FeatureFlags) DeepCopy() *FeatureFlags { + if in == nil { + return nil + } + out := new(FeatureFlags) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go index bd457b8ede..8245f6026c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -32,32 +32,58 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA return map[string]common.OpenAPIDefinition{ "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference": schema_pkg_apis_pipeline_v1_ChildStatusReference(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask": schema_pkg_apis_pipeline_v1_EmbeddedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix": schema_pkg_apis_pipeline_v1_Matrix(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue": schema_pkg_apis_pipeline_v1_ParamValue(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Pipeline": schema_pkg_apis_pipeline_v1_Pipeline(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineList": schema_pkg_apis_pipeline_v1_PipelineList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef": schema_pkg_apis_pipeline_v1_PipelineRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult": schema_pkg_apis_pipeline_v1_PipelineResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun": schema_pkg_apis_pipeline_v1_PipelineRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunList": schema_pkg_apis_pipeline_v1_PipelineRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult": schema_pkg_apis_pipeline_v1_PipelineRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec": schema_pkg_apis_pipeline_v1_PipelineRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1_PipelineRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1_PipelineRunTaskRunStatus(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec": schema_pkg_apis_pipeline_v1_PipelineSpec(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask": schema_pkg_apis_pipeline_v1_PipelineTask(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskParam": schema_pkg_apis_pipeline_v1_PipelineTaskParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRun": schema_pkg_apis_pipeline_v1_PipelineTaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1_PipelineTaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate": schema_pkg_apis_pipeline_v1_PipelineTaskRunTemplate(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam": schema_pkg_apis_pipeline_v1_ResolverParam(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResultRef": schema_pkg_apis_pipeline_v1_ResultRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState": schema_pkg_apis_pipeline_v1_SidecarState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask": schema_pkg_apis_pipeline_v1_SkippedTask(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step": schema_pkg_apis_pipeline_v1_Step(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig": schema_pkg_apis_pipeline_v1_StepOutputConfig(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState": schema_pkg_apis_pipeline_v1_StepState(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate": schema_pkg_apis_pipeline_v1_StepTemplate(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef": schema_pkg_apis_pipeline_v1_TaskRef(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult": schema_pkg_apis_pipeline_v1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun": schema_pkg_apis_pipeline_v1_TaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug": schema_pkg_apis_pipeline_v1_TaskRunDebug(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunInputs": schema_pkg_apis_pipeline_v1_TaskRunInputs(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunList": schema_pkg_apis_pipeline_v1_TaskRunList(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult": schema_pkg_apis_pipeline_v1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1_TaskRunSidecarOverride(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec": schema_pkg_apis_pipeline_v1_TaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus": schema_pkg_apis_pipeline_v1_TaskRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1_TaskRunStepOverride(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields": schema_pkg_apis_pipeline_v1_TimeoutFields(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression": schema_pkg_apis_pipeline_v1_WhenExpression(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding": schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref), @@ -330,6 +356,66 @@ func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common. } } +func schema_pkg_apis_pipeline_v1_ChildStatusReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the TaskRun or Run this is referencing.", + Type: []string{"string"}, + Format: "", + }, + }, + "pipelineTaskName": { + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskName is the name of the PipelineTask this is referencing.", + Type: []string{"string"}, + Format: "", + }, + }, + "whenExpressions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"}, + } +} + func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -497,6 +583,40 @@ func schema_pkg_apis_pipeline_v1_EmbeddedTask(ref common.ReferenceCallback) comm } } +func schema_pkg_apis_pipeline_v1_Matrix(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Matrix is used to fan out Tasks in a Pipeline", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, + } +} + func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -739,6 +859,33 @@ func schema_pkg_apis_pipeline_v1_PipelineList(ref common.ReferenceCallback) comm } } +func schema_pkg_apis_pipeline_v1_PipelineRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineRef can be used to refer to a specific instance of a Pipeline.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "API version of the referent", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_pipeline_v1_PipelineResult(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -785,110 +932,88 @@ func schema_pkg_apis_pipeline_v1_PipelineResult(ref common.ReferenceCallback) co } } -func schema_pkg_apis_pipeline_v1_PipelineSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineRun(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineSpec defines the desired state of Pipeline.", + Description: "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "description": { + "kind": { SchemaProps: spec.SchemaProps{ - Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.", + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", Type: []string{"string"}, Format: "", }, }, - "tasks": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Tasks declares the graph of Tasks that execute when this Pipeline is run.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"), - }, - }, - }, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "params": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "metadata": { SchemaProps: spec.SchemaProps{ - Description: "Params declares a list of input parameters that must be supplied when this Pipeline is run.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, - "workspaces": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec"), }, + }, + "status": { SchemaProps: spec.SchemaProps{ - Description: "Workspaces declares a set of named workspaces that are expected to be provided by a PipelineRun.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"), - }, - }, - }, + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus"), }, }, - "results": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineRunList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineRunList contains a list of PipelineRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", }, + }, + "apiVersion": { SchemaProps: spec.SchemaProps{ - Description: "Results are values that this pipeline can output once run", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult"), - }, - }, - }, + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", }, }, - "finally": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, + }, + "items": { SchemaProps: spec.SchemaProps{ - Description: "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline", - Type: []string{"array"}, + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun"), }, }, }, @@ -898,77 +1023,105 @@ func schema_pkg_apis_pipeline_v1_PipelineSpec(ref common.ReferenceCallback) comm }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.", + Description: "PipelineRunResult used to describe the results of a pipeline", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.", + Description: "Name is the result's name as declared by the Pipeline", + Default: "", Type: []string{"string"}, Format: "", }, }, - "taskRef": { + "value": { SchemaProps: spec.SchemaProps{ - Description: "TaskRef is a reference to a task definition.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef"), + Description: "Value is the result returned from the execution of this PipelineRun", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"), }, }, - "taskSpec": { + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineRunRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineRunRunStatus contains the name of the PipelineTask for this Run and the Run's Status", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pipelineTaskName": { SchemaProps: spec.SchemaProps{ - Description: "TaskSpec is a specification of a task", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask"), + Description: "PipelineTaskName is the name of the PipelineTask.", + Type: []string{"string"}, + Format: "", }, }, - "when": { + "status": { SchemaProps: spec.SchemaProps{ - Description: "When is a list of when expressions that need to be true for the task to run", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), - }, - }, - }, - }, - }, - "retries": { - SchemaProps: spec.SchemaProps{ - Description: "Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False", - Type: []string{"integer"}, - Format: "int32", + Description: "Status is the RunStatus for the corresponding Run", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1.RunStatus"), }, }, - "runAfter": { + "whenExpressions": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "RunAfter is the list of PipelineTask names that should be executed before this Task executes. (Used to force a specific ordering in graph execution.)", + Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), }, }, }, }, }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1.RunStatus"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineRunSpec defines the desired state of PipelineRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pipelineRef": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef"), + }, + }, + "pipelineSpec": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"), + }, + }, "params": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ @@ -976,7 +1129,7 @@ func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) comm }, }, SchemaProps: spec.SchemaProps{ - Description: "Parameters declares parameters passed to this task.", + Description: "Params is a list of parameter names and values.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -988,75 +1141,101 @@ func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) comm }, }, }, - "matrix": { + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Used for cancelling a pipelinerun (and maybe more later on)", + Type: []string{"string"}, + Format: "", + }, + }, + "timeouts": { + SchemaProps: spec.SchemaProps{ + Description: "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields"), + }, + }, + "taskRunTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "TaskRunTemplate represent template of taskrun", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate"), + }, + }, + "workspaces": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Matrix declares parameters used to fan out this task.", + Description: "Workspaces holds a set of workspace bindings that must match names with those declared in the pipeline.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"), }, }, }, }, }, - "workspaces": { + "taskRunSpecs": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Workspaces maps workspaces from the pipeline spec to the workspaces declared in the Task.", + Description: "TaskRunSpecs holds a set of runtime specs", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec"), }, }, }, }, }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskRunTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TimeoutFields", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"}, } } -func schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask", + Description: "PipelineRunStatus defines the observed state of PipelineRun", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "labels": { + "observedGeneration": { SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Conditions the latest available observations of a resource's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("knative.dev/pkg/apis.Condition"), }, }, }, @@ -1064,7 +1243,8 @@ func schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref common.ReferenceCallba }, "annotations": { SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, + Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + Type: []string{"object"}, AdditionalProperties: &spec.SchemaOrBool{ Allows: true, Schema: &spec.Schema{ @@ -1077,793 +1257,2271 @@ func schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref common.ReferenceCallba }, }, }, - }, - }, - }, - } -} - -func schema_pkg_apis_pipeline_v1_PipelineTaskParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PipelineTaskParam is used to provide arbitrary string parameters to a Task.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { + "startTime": { SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Description: "StartTime is the time the PipelineRun is actually started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "value": { + "completionTime": { SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Description: "CompletionTime is the time the PipelineRun completed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - -func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of a workspace to be provided by a PipelineRun.", - Default: "", - Type: []string{"string"}, - Format: "", + "results": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - "description": { SchemaProps: spec.SchemaProps{ - Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", - Type: []string{"string"}, - Format: "", + Description: "Results are the list of results written out by the pipeline task's containers", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult"), + }, + }, + }, }, }, - "optional": { + "pipelineSpec": { SchemaProps: spec.SchemaProps{ - Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", - Type: []string{"boolean"}, - Format: "", + Description: "PipelineRunSpec contains the exact spec used to instantiate the run", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"), }, }, - }, - Required: []string{"name"}, - }, - }, - } -} - -func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PropertySpec defines the struct for object keys", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "type": { + "skippedTasks": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "list of tasks that were skipped due to when expressions evaluating to false", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask"), + }, + }, + }, }, }, - }, - }, - }, - } -} - -func schema_pkg_apis_pipeline_v1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResolverParam is a single parameter passed to a resolver.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { + "childReferences": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the parameter that will be passed to the resolver.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference"), + }, + }, + }, }, }, - "value": { + "finallyStartTime": { SchemaProps: spec.SchemaProps{ - Description: "Value is the string value of the parameter that will be passed to the resolver.", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, }, - Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, } } -func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + Description: "PipelineRunStatusFields holds the fields of PipelineRunStatus' status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "resolver": { + "startTime": { SchemaProps: spec.SchemaProps{ - Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", - Type: []string{"string"}, - Format: "", + Description: "StartTime is the time the PipelineRun is actually started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "resource": { + "completionTime": { + SchemaProps: spec.SchemaProps{ + Description: "CompletionTime is the time the PipelineRun completed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "results": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Description: "Results are the list of results written out by the pipeline task's containers", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult"), }, }, }, }, }, - }, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"}, - } -} - -func schema_pkg_apis_pipeline_v1_ResultRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResultRef is a type that represents a reference to a task run result", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "pipelineTask": { + "pipelineSpec": { SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Description: "PipelineRunSpec contains the exact spec used to instantiate the run", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec"), }, }, - "result": { - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + "skippedTasks": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "list of tasks that were skipped due to when expressions evaluating to false", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask"), + }, + }, + }, }, }, - "resultsIndex": { + "childReferences": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Default: 0, - Type: []string{"integer"}, - Format: "int32", + Description: "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference"), + }, + }, + }, }, }, - "property": { + "finallyStartTime": { SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, }, - Required: []string{"pipelineTask", "result", "resultsIndex", "property"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ChildStatusReference", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SkippedTask", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } -func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_PipelineRunTaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + Description: "PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the Sidecar specified as a DNS_LABEL. Each Sidecar in a Task must have a unique name (DNS_LABEL). Cannot be updated.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { + "pipelineTaskName": { SchemaProps: spec.SchemaProps{ - Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Description: "PipelineTaskName is the name of the PipelineTask.", Type: []string{"string"}, Format: "", }, }, - "command": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "status": { SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "Status is the TaskRunStatus for the corresponding TaskRun", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"), }, }, - "args": { + "whenExpressions": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), }, }, }, }, }, - "workingDir": { + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineSpec defines the desired state of Pipeline.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "description": { SchemaProps: spec.SchemaProps{ - Description: "Sidecar's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Description: "Description is a user-facing description of the pipeline that may be used to populate a UI.", Type: []string{"string"}, Format: "", }, }, - "ports": { + "tasks": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the Sidecar. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Description: "Tasks declares the graph of Tasks that execute when this Pipeline is run.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ContainerPort"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"), }, }, }, }, }, - "envFrom": { + "params": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the Sidecar. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Description: "Params declares a list of input parameters that must be supplied when this Pipeline is run.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"), }, }, }, }, }, - "env": { + "workspaces": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the Sidecar. Cannot be updated.", + Description: "Workspaces declares a set of named workspaces that are expected to be provided by a PipelineRun.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"), }, }, }, }, }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this Sidecar. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { + "results": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Volumes to mount into the Sidecar's filesystem. Cannot be updated.", + Description: "Results are values that this pipeline can output once run", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult"), }, }, }, }, }, - "volumeDevices": { + "finally": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the Sidecar.", + Description: "Finally declares the list of Tasks that execute just before leaving the Pipeline i.e. either after all Tasks are finished executing successfully or after a failure which would result in ending the Pipeline", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask"), }, }, }, }, }, - "livenessProbe": { + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTask(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTask defines a task in a Pipeline, passing inputs from both Params and from the output of previous tasks.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of Sidecar liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), + Description: "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.", + Type: []string{"string"}, + Format: "", }, }, - "readinessProbe": { + "taskRef": { SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of Sidecar service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), + Description: "TaskRef is a reference to a task definition.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef"), }, }, - "startupProbe": { + "taskSpec": { SchemaProps: spec.SchemaProps{ - Description: "StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), + Description: "TaskSpec is a specification of a task", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask"), }, }, - "lifecycle": { + "when": { SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to Sidecar lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), + Description: "When is a list of when expressions that need to be true for the task to run", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), + }, + }, + }, }, }, - "terminationMessagePath": { + "retries": { SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the Sidecar's termination message will be written is mounted into the Sidecar's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", + Description: "Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False", + Type: []string{"integer"}, + Format: "int32", }, }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the Sidecar status message on both success and failure. FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination message file is empty and the Sidecar exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", + "runAfter": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - "imagePullPolicy": { SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", + Description: "RunAfter is the list of PipelineTask names that should be executed before this Task executes. (Used to force a specific ordering in graph execution.)", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the Sidecar should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - "stdin": { SchemaProps: spec.SchemaProps{ - Description: "Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Sidecar will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", + Description: "Parameters declares parameters passed to this task.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + }, + }, + }, }, }, - "stdinOnce": { + "matrix": { SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the Sidecar is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", + Description: "Matrix declares parameters used to fan out this task.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix"), }, }, - "tty": { + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Description: "Workspaces maps workspaces from the pipeline spec to the workspaces declared in the Task.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding"), + }, + }, + }, + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Time after which the TaskRun times out. Defaults to 1 hour. Specified TaskRun timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Matrix", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTaskMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "labels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTaskParam(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskParam is used to provide arbitrary string parameters to a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskRun reports the results of running a step in the Task. Each task has the potential to succeed or fail (based on the exit code) and produces logs.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pipelineTaskName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "podTemplate": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"), + }, + }, + "stepOverrides": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepOverride"), + }, + }, + }, + }, + }, + "sidecarOverrides": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarOverride"), + }, + }, + }, + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata"), + }, + }, + "computeResources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepOverride", "k8s.io/api/core/v1.ResourceRequirements"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineTaskRunTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PipelineTaskRunTemplate is used to specify run specifications for all Task in pipelinerun.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "podTemplate": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of a workspace to be provided by a PipelineRun.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", Type: []string{"boolean"}, Format: "", }, }, - "script": { + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PropertySpec defines the struct for object keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resolver": { + SchemaProps: spec.SchemaProps{ + Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + Type: []string{"string"}, + Format: "", + }, + }, + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, + } +} + +func schema_pkg_apis_pipeline_v1_ResultRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResultRef is a type that represents a reference to a task run result", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pipelineTask": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "result": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "resultsIndex": { + SchemaProps: spec.SchemaProps{ + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + "property": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"pipelineTask", "result", "resultsIndex", "property"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the Sidecar specified as a DNS_LABEL. Each Sidecar in a Task must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Sidecar's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Sidecar's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the Sidecar. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the Sidecar. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the Sidecar. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this Sidecar. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes to mount into the Sidecar's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the Sidecar.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of Sidecar liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of Sidecar service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod the Sidecar is running in has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to Sidecar lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the Sidecar's termination message will be written is mounted into the Sidecar's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the Sidecar status message on both success and failure. FallbackToLogsOnError will use the last chunk of Sidecar log output if the termination message file is empty and the Sidecar exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the Sidecar should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this Sidecar should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the Sidecar will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on Sidecar start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the Sidecar is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this Sidecar should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Type: []string{"string"}, + Format: "", + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_SidecarState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SidecarState reports the results of running a sidecar in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a waiting container", + Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), + }, + }, + "running": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a running container", + Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), + }, + }, + "terminated": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a terminated container", + Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "imageID": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, + } +} + +func schema_pkg_apis_pipeline_v1_SkippedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the Pipeline Task name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Reason is the cause of the PipelineTask being skipped.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "whenExpressions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"), + }, + }, + }, + }, + }, + }, + Required: []string{"name", "reason"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WhenExpression"}, + } +} + +func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Step runs a subcomponent of a Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the Step specified as a DNS_LABEL. Each Step in a Task must have a unique name.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the Step. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes to mount into the Step's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the Step.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + "onError": { + SchemaProps: spec.SchemaProps{ + Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ]", + Type: []string{"string"}, + Format: "", + }, + }, + "stdoutConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stdout stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + "stderrConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stderr stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepOutputConfig stores configuration for a step output stream.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to duplicate stdout stream to on container's local filesystem.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_StepState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepState reports the results of running a step in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a waiting container", + Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), + }, + }, + "running": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a running container", + Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), + }, + }, + "terminated": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a terminated container", + Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "imageID": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, + } +} + +func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate is a template for a Step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the Step. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes to mount into the Step's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the Step.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds the desired state of the Task from the client", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskList contains a list of Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRef can be used to refer to a specific instance of a task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "TaskKind indicates the kind of the task, namespaced or cluster scoped.", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "API version of the referent", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human-readable description of the result", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRun(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunDebug(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunDebug defines the breakpoint config for a particular TaskRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "breakpoint": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunInputs(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunInputs holds the input values that this task was invoked with.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunList contains a list of TaskRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { SchemaProps: spec.SchemaProps{ - Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", Type: []string{"string"}, Format: "", }, }, - "workspaces": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "value": { SchemaProps: spec.SchemaProps{ - Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), - }, - }, - }, + Description: "Value the given value of the result", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"), }, }, }, - Required: []string{"name"}, + Required: []string{"name", "value"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"}, } } -func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_TaskRunSidecarOverride(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Step runs a subcomponent of a Task", + Description: "TaskRunSidecarOverride is used to override the values of a Sidecar in the corresponding Task.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "Name of the Step specified as a DNS_LABEL. Each Step in a Task must have a unique name.", + Description: "The name of the Sidecar to override.", Default: "", Type: []string{"string"}, Format: "", }, }, - "image": { + "resources": { SchemaProps: spec.SchemaProps{ - Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", - Type: []string{"string"}, - Format: "", + Description: "The resource requirements to apply to the Sidecar.", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, - "command": { + }, + Required: []string{"name", "resources"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceRequirements"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunSpec defines the desired state of TaskRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "debug": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug"), + }, + }, + "params": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param"), }, }, }, }, }, - "args": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", }, + }, + "taskRef": { SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, + Description: "no more than one of the TaskRef and TaskSpec may be specified.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef"), }, }, - "workingDir": { + "taskSpec": { SchemaProps: spec.SchemaProps{ - Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Used for cancelling a taskrun (and maybe more later on)", Type: []string{"string"}, Format: "", }, }, - "envFrom": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "statusMessage": { SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, + Description: "Status message for cancellation.", + Type: []string{"string"}, + Format: "", }, }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, + "timeout": { SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the Step. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, + Description: "Time after which the build times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), }, }, - "resources": { + "podTemplate": { SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + Description: "PodTemplate holds pod specific configuration", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template"), }, }, - "volumeMounts": { + "workspaces": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Volumes to mount into the Step's filesystem. Cannot be updated.", + Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding"), }, }, }, }, }, - "volumeDevices": { + "stepOverrides": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the Step.", + Description: "Overrides to apply to Steps in this TaskRun. If a field is specified in both a Step and a StepOverride, the value from the StepOverride will be used. This field is only supported when the alpha feature gate is enabled.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepOverride"), }, }, }, }, }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "script": { - SchemaProps: spec.SchemaProps{ - Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", - Type: []string{"string"}, - Format: "", - }, - }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), - }, - }, - "workspaces": { + "sidecarOverrides": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Description: "Overrides to apply to Sidecars in this TaskRun. If a field is specified in both a Sidecar and a SidecarOverride, the value from the SidecarOverride will be used. This field is only supported when the alpha feature gate is enabled.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarOverride"), }, }, }, }, }, - "onError": { - SchemaProps: spec.SchemaProps{ - Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ]", - Type: []string{"string"}, - Format: "", - }, - }, - "stdoutConfig": { + "computeResources": { SchemaProps: spec.SchemaProps{ - Description: "Stores configuration for the stdout stream of the step.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), - }, - }, - "stderrConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Stores configuration for the stderr stream of the step.", - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, }, - Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, - } -} - -func schema_pkg_apis_pipeline_v1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "StepOutputConfig stores configuration for a step output stream.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path to duplicate stdout stream to on container's local filesystem.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } -func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_TaskRunStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "StepTemplate is a template for a Step", + Description: "TaskRunStatus defines the observed state of TaskRun", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "image": { + "observedGeneration": { SchemaProps: spec.SchemaProps{ - Description: "Image reference name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", + Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + Type: []string{"integer"}, + Format: "int64", }, }, - "command": { + "conditions": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", }, }, SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Description: "Conditions the latest available observations of a resource's current state.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("knative.dev/pkg/apis.Condition"), }, }, }, }, }, - "args": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, + "annotations": { SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the Step's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ + Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: "", @@ -1874,336 +3532,263 @@ func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) comm }, }, }, - "workingDir": { + "podName": { SchemaProps: spec.SchemaProps{ - Description: "Step's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Description: "PodName is the name of the pod responsible for executing this task's steps.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "envFrom": { + "startTime": { + SchemaProps: spec.SchemaProps{ + Description: "StartTime is the time the build is actually started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "completionTime": { + SchemaProps: spec.SchemaProps{ + Description: "CompletionTime is the time the build completed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "steps": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the Step. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the Step is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Description: "Steps describes the state of each build step container.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState"), }, }, }, }, }, - "env": { + "retriesStatus": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the Step. Cannot be updated.", + Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.EnvVar"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"), }, }, }, }, }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this Step. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { + "results": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Volumes to mount into the Step's filesystem. Cannot be updated.", + Description: "Results are the list of results written out by the task's containers", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeMount"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult"), }, }, }, }, }, - "volumeDevices": { + "sidecars": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", + "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the Step.", + Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState"), }, }, }, }, }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext defines the security options the Step should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { + "taskSpec": { SchemaProps: spec.SchemaProps{ - Description: "Spec holds the desired state of the Task from the client", - Default: map[string]interface{}{}, + Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.", Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), }, }, }, + Required: []string{"podName"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "knative.dev/pkg/apis.Condition"}, } } -func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_TaskRunStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskList contains a list of Task", + Description: "TaskRunStatusFields holds the fields of TaskRun's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { + "podName": { SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Description: "PodName is the name of the pod responsible for executing this task's steps.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "apiVersion": { + "startTime": { SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", + Description: "StartTime is the time the build is actually started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "metadata": { + "completionTime": { SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Description: "CompletionTime is the time the build completed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, - "items": { + "steps": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Description: "Steps describes the state of each build step container.", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState"), }, }, }, }, }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_pipeline_v1_TaskRef(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TaskRef can be used to refer to a specific instance of a task.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Type: []string{"string"}, - Format: "", + "retriesStatus": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - "kind": { SchemaProps: spec.SchemaProps{ - Description: "TaskKind indicates the kind of the task, namespaced or cluster scoped.", - Type: []string{"string"}, - Format: "", + Description: "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus"), + }, + }, + }, }, }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "API version of the referent", - Type: []string{"string"}, - Format: "", + "results": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - }, - }, - }, - } -} - -func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TaskResult used to describe the results of a task", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { SchemaProps: spec.SchemaProps{ - Description: "Name the given name", - Default: "", - Type: []string{"string"}, - Format: "", + Description: "Results are the list of results written out by the task's containers", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult"), + }, + }, + }, }, }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", - Type: []string{"string"}, - Format: "", + "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, }, - }, - "properties": { SchemaProps: spec.SchemaProps{ - Description: "Properties is the JSON Schema properties to support key-value pairs results.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, + Description: "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState"), }, }, }, }, }, - "description": { + "taskSpec": { SchemaProps: spec.SchemaProps{ - Description: "Description is a human-readable description of the result", - Type: []string{"string"}, - Format: "", + Description: "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), }, }, }, - Required: []string{"name"}, + Required: []string{"podName"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.SidecarState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepState", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunStatus", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } -func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_pipeline_v1_TaskRunStepOverride(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRunResult used to describe the results of a task", + Description: "TaskRunStepOverride is used to override the values of a Step in the corresponding Task.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { SchemaProps: spec.SchemaProps{ - Description: "Name the given name", + Description: "The name of the Step to override.", Default: "", Type: []string{"string"}, Format: "", }, }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { + "resources": { SchemaProps: spec.SchemaProps{ - Description: "Value the given value of the result", + Description: "The resource requirements to apply to the Step.", Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"), + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), }, }, }, - Required: []string{"name", "value"}, + Required: []string{"name", "resources"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamValue"}, + "k8s.io/api/core/v1.ResourceRequirements"}, } } @@ -2349,6 +3934,39 @@ func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.O } } +func schema_pkg_apis_pipeline_v1_TimeoutFields(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TimeoutFields allows granular specification of pipeline, task, and finally timeouts", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pipeline": { + SchemaProps: spec.SchemaProps{ + Description: "Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "tasks": { + SchemaProps: spec.SchemaProps{ + Description: "Tasks sets the maximum allowed duration of this pipeline's tasks", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "finally": { + SchemaProps: spec.SchemaProps{ + Description: "Finally sets the maximum allowed duration of this pipeline's finally", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + func schema_pkg_apis_pipeline_v1_WhenExpression(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2451,12 +4069,24 @@ func schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), }, }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "Projected represents a projected volume that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go index 075e35a6f0..7a64329765 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -320,19 +320,23 @@ func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefi return errs } -func validateParametersInTaskMatrix(matrix []Param) (errs *apis.FieldError) { - for _, param := range matrix { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) +func validateParametersInTaskMatrix(matrix *Matrix) (errs *apis.FieldError) { + if matrix != nil { + for _, param := range matrix.Params { + if param.Value.Type != ParamTypeArray { + errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) + } } } return errs } -func validateParameterInOneOfMatrixOrParams(matrix []Param, params []Param) (errs *apis.FieldError) { +func validateParameterInOneOfMatrixOrParams(matrix *Matrix, params []Param) (errs *apis.FieldError) { matrixParameterNames := sets.NewString() - for _, param := range matrix { - matrixParameterNames.Insert(param.Name) + if matrix != nil { + for _, param := range matrix.Params { + matrixParameterNames.Insert(param.Name) + } } for _, param := range params { if matrixParameterNames.Has(param.Name) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go index 716d38549e..9062358aa1 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_types.go @@ -189,8 +189,7 @@ type PipelineTask struct { // Matrix declares parameters used to fan out this task. // +optional - // +listType=atomic - Matrix []Param `json:"matrix,omitempty"` + Matrix *Matrix `json:"matrix,omitempty"` // Workspaces maps workspaces from the pipeline spec to the workspaces // declared in the Task. @@ -205,6 +204,16 @@ type PipelineTask struct { Timeout *metav1.Duration `json:"timeout,omitempty"` } +// Matrix is used to fan out Tasks in a Pipeline +type Matrix struct { + // Params is a list of parameters used to fan out the pipelineTask + // Params takes only `Parameters` of type `"array"` + // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. + // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. + // +listType=atomic + Params []Param `json:"params,omitempty"` +} + // validateRefOrSpec validates at least one of taskRef or taskSpec is specified func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { // can't have both taskRef and taskSpec at the same time @@ -256,16 +265,21 @@ func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) if pt.TaskRef.Resolver != "" { errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) } - if len(pt.TaskRef.Resource) > 0 { - errs = errs.Also(apis.ErrDisallowedFields("taskref.resource")) + if len(pt.TaskRef.Params) > 0 { + errs = errs.Also(apis.ErrDisallowedFields("taskref.params")) } } } return errs } +// IsMatrixed return whether pipeline task is matrixed +func (pt *PipelineTask) IsMatrixed() bool { + return pt.Matrix != nil && len(pt.Matrix.Params) > 0 +} + func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { - if len(pt.Matrix) != 0 { + if pt.IsMatrixed() { // This is an alpha feature and will fail validation if it's used in a pipeline spec // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) @@ -310,11 +324,11 @@ func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { // GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. func (pt *PipelineTask) GetMatrixCombinationsCount() int { - if len(pt.Matrix) == 0 { + if !pt.IsMatrixed() { return 0 } count := 1 - for _, param := range pt.Matrix { + for _, param := range pt.Matrix.Params { count *= len(param.Value.ArrayVal) } return count @@ -468,29 +482,22 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { return } -// Deps returns all other PipelineTask dependencies of this PipelineTask, based on ordering +// Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering func (pt PipelineTask) Deps() []string { - deps := []string{} - - deps = append(deps, pt.orderingDeps()...) + // hold the list of dependencies in a set to avoid duplicates + deps := sets.NewString() - uniqueDeps := sets.NewString() - for _, w := range deps { - if uniqueDeps.Has(w) { - continue - } - uniqueDeps.Insert(w) + // add any new dependents from result references - resource dependency + for _, ref := range PipelineTaskResultRefs(&pt) { + deps.Insert(ref.PipelineTask) } - return uniqueDeps.List() -} - -func (pt PipelineTask) orderingDeps() []string { - orderingDeps := []string{} + // add any new dependents from runAfter - order dependency for _, runAfter := range pt.RunAfter { - orderingDeps = append(orderingDeps, runAfter) + deps.Insert(runAfter) } - return orderingDeps + + return deps.List() } // PipelineTaskList is a list of PipelineTasks diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go index 8e5fd40b2c..216d424cf7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go @@ -53,17 +53,16 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(ValidatePipelineTasks(ctx, ps.Tasks, ps.Finally)) // Validate the pipeline task graph errs = errs.Also(validateGraph(ps.Tasks)) - errs = errs.Also(validateParamResults(ps.Tasks)) // The parameter variables should be valid - errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks")) - errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally")) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally")) errs = errs.Also(validatePipelineContextVariables(ps.Tasks).ViaField("tasks")) errs = errs.Also(validatePipelineContextVariables(ps.Finally).ViaField("finally")) errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -106,7 +105,10 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) // validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in // the pipeline -func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { +func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { + if config.ValidateParameterVariablesAndWorkspaces(ctx) == false { + return nil + } workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -118,10 +120,10 @@ func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []P return errs } -// validatePipelineParameterVariables validates parameters with those specified by each pipeline task, +// ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches // with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { parameterNames := sets.NewString() arrayParameterNames := sets.NewString() objectParameterNameKeys := map[string][]string{} @@ -145,13 +147,18 @@ func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTas } } } - return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + } + return errs } func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + if task.IsMatrixed() { + errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + } errs = errs.Also(task.When.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } return errs @@ -171,7 +178,11 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { ) var paramValues []string for _, task := range tasks { - for _, param := range append(task.Params, task.Matrix...) { + var matrixParams []Param + if task.IsMatrixed() { + matrixParams = task.Matrix.Params + } + for _, param := range append(task.Params, matrixParams...) { paramValues = append(paramValues, param.Value.StringVal) paramValues = append(paramValues, param.Value.ArrayVal...) } @@ -220,26 +231,6 @@ func validatePipelineContextVariablesInParamValues(paramValues []string, prefix return errs } -// validateParamResults ensures that task result variables are properly configured -func validateParamResults(tasks []PipelineTask) (errs *apis.FieldError) { - for idx, task := range tasks { - for _, param := range task.Params { - expressions, ok := GetVarSubstitutionExpressionsForParam(param) - if ok { - if LooksLikeContainsResultRefs(expressions) { - expressions = filter(expressions, looksLikeResultRef) - resultRefs := NewResultRefs(expressions) - if len(expressions) != len(resultRefs) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), - "value").ViaFieldKey("params", param.Name).ViaFieldIndex("tasks", idx)) - } - } - } - } - } - return errs -} - func filter(arr []string, cond func(string) bool) []string { result := []string{} for i := range arr { @@ -377,11 +368,11 @@ func validateWhenExpressions(tasks []PipelineTask, finalTasks []PipelineTask) (e // validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency // cycle or that they rely on values from Tasks that ran previously. -func validateGraph(tasks []PipelineTask) *apis.FieldError { +func validateGraph(tasks []PipelineTask) (errs *apis.FieldError) { if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil { - return apis.ErrInvalidValue(err.Error(), "tasks") + errs = errs.Also(apis.ErrInvalidValue(err.Error(), "tasks")) } - return nil + return errs } func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.FieldError) { @@ -394,7 +385,7 @@ func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.Field func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, finally []PipelineTask) (errs *apis.FieldError) { matrixedPipelineTasks := sets.String{} for _, pt := range tasks { - if len(pt.Matrix) != 0 { + if pt.IsMatrixed() { matrixedPipelineTasks.Insert(pt.Name) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_types.go new file mode 100644 index 0000000000..631c5646d3 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_types.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// PipelineRef can be used to refer to a specific instance of a Pipeline. +type PipelineRef struct { + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name,omitempty"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // ResolverRef allows referencing a Pipeline in a remote location + // like a git repo. This field is only supported when the alpha + // feature gate is enabled. + // +optional + ResolverRef `json:",omitempty"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go new file mode 100644 index 0000000000..8fd971828c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelineref_validation.go @@ -0,0 +1,44 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" + "knative.dev/pkg/apis" +) + +// Validate ensures that a supplied PipelineRef field is populated +// correctly. No errors are returned for a nil PipelineRef. +func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { + if ref == nil { + return + } + + switch { + case ref.Resolver != "": + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + } + case ref.Name == "": + errs = errs.Also(apis.ErrMissingField("name")) + } + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_conversion.go new file mode 100644 index 0000000000..2bb626dda8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_conversion.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +var _ apis.Convertible = (*PipelineRun)(nil) + +// ConvertTo implements apis.Convertible +func (pr *PipelineRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (pr *PipelineRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go new file mode 100644 index 0000000000..d386021959 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_defaults.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +var _ apis.Defaultable = (*PipelineRun)(nil) + +// SetDefaults implements apis.Defaultable +func (pr *PipelineRun) SetDefaults(ctx context.Context) { + pr.Spec.SetDefaults(ctx) +} + +// SetDefaults implements apis.Defaultable +func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + + if prs.Timeouts != nil && prs.Timeouts.Pipeline == nil { + prs.Timeouts.Pipeline = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + } + + defaultSA := cfg.Defaults.DefaultServiceAccount + if prs.TaskRunTemplate.ServiceAccountName == "" && defaultSA != "" { + prs.TaskRunTemplate.ServiceAccountName = defaultSA + } + + defaultPodTemplate := cfg.Defaults.DefaultPodTemplate + prs.TaskRunTemplate.PodTemplate = pod.MergePodTemplateWithDefault(prs.TaskRunTemplate.PodTemplate, defaultPodTemplate) + + if prs.PipelineSpec != nil { + prs.PipelineSpec.SetDefaults(ctx) + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go new file mode 100644 index 0000000000..6cc8482e8e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_types.go @@ -0,0 +1,573 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/tektoncd/pipeline/pkg/apis/config" + apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + runv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/clock" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" +) + +// +genclient +// +genreconciler:krshapedlogic=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PipelineRun represents a single execution of a Pipeline. PipelineRuns are how +// the graph of Tasks declared in a Pipeline are executed; they specify inputs +// to Pipelines such as parameter values and capture operational aspects of the +// Tasks execution such as service account and tolerations. Creating a +// PipelineRun creates TaskRuns for Tasks in the referenced Pipeline. +// +// +k8s:openapi-gen=true +type PipelineRun struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec PipelineRunSpec `json:"spec,omitempty"` + // +optional + Status PipelineRunStatus `json:"status,omitempty"` +} + +// GetName Returns the name of the PipelineRun +func (pr *PipelineRun) GetName() string { + return pr.ObjectMeta.GetName() +} + +// GetStatusCondition returns the task run status as a ConditionAccessor +func (pr *PipelineRun) GetStatusCondition() apis.ConditionAccessor { + return &pr.Status +} + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*PipelineRun) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind(pipeline.PipelineRunControllerName) +} + +// IsDone returns true if the PipelineRun's status indicates that it is done. +func (pr *PipelineRun) IsDone() bool { + return !pr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() +} + +// HasStarted function check whether pipelinerun has valid start time set in its status +func (pr *PipelineRun) HasStarted() bool { + return pr.Status.StartTime != nil && !pr.Status.StartTime.IsZero() +} + +// IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state +func (pr *PipelineRun) IsCancelled() bool { + return pr.Spec.Status == PipelineRunSpecStatusCancelled +} + +// IsGracefullyCancelled returns true if the PipelineRun's spec status is set to CancelledRunFinally state +func (pr *PipelineRun) IsGracefullyCancelled() bool { + return pr.Spec.Status == PipelineRunSpecStatusCancelledRunFinally +} + +// IsGracefullyStopped returns true if the PipelineRun's spec status is set to StoppedRunFinally state +func (pr *PipelineRun) IsGracefullyStopped() bool { + return pr.Spec.Status == PipelineRunSpecStatusStoppedRunFinally +} + +// PipelineTimeout returns the the applicable timeout for the PipelineRun +func (pr *PipelineRun) PipelineTimeout(ctx context.Context) time.Duration { + if pr.Spec.Timeouts != nil && pr.Spec.Timeouts.Pipeline != nil { + return pr.Spec.Timeouts.Pipeline.Duration + } + return time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute +} + +// TasksTimeout returns the the tasks timeout for the PipelineRun, if set, +// or the tasks timeout computed from the Pipeline and Finally timeouts, if those are set. +func (pr *PipelineRun) TasksTimeout() *metav1.Duration { + t := pr.Spec.Timeouts + if t == nil { + return nil + } + if t.Tasks != nil { + return t.Tasks + } + if t.Pipeline != nil && t.Finally != nil { + if t.Pipeline.Duration == apisconfig.NoTimeoutDuration || t.Finally.Duration == apisconfig.NoTimeoutDuration { + return nil + } + return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Finally.Duration)} + } + return nil +} + +// FinallyTimeout returns the the finally timeout for the PipelineRun, if set, +// or the finally timeout computed from the Pipeline and Tasks timeouts, if those are set. +func (pr *PipelineRun) FinallyTimeout() *metav1.Duration { + t := pr.Spec.Timeouts + if t == nil { + return nil + } + if t.Finally != nil { + return t.Finally + } + if t.Pipeline != nil && t.Tasks != nil { + if t.Pipeline.Duration == apisconfig.NoTimeoutDuration || t.Tasks.Duration == apisconfig.NoTimeoutDuration { + return nil + } + return &metav1.Duration{Duration: (t.Pipeline.Duration - t.Tasks.Duration)} + } + return nil +} + +// IsPending returns true if the PipelineRun's spec status is set to Pending state +func (pr *PipelineRun) IsPending() bool { + return pr.Spec.Status == PipelineRunSpecStatusPending +} + +// GetNamespacedName returns a k8s namespaced name that identifies this PipelineRun +func (pr *PipelineRun) GetNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: pr.Namespace, Name: pr.Name} +} + +// HasTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout +func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.PipelineTimeout(ctx) + startTime := pr.Status.StartTime + + if !startTime.IsZero() { + if timeout == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout { + return true + } + } + return false +} + +// HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks +func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.TasksTimeout() + startTime := pr.Status.StartTime + + if !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + +// HasFinallyTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Finally, based on status.FinallyStartTime +func (pr *PipelineRun) HasFinallyTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.FinallyTimeout() + startTime := pr.Status.FinallyStartTime + + if startTime != nil && !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + +// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is +// used for creating PersistentVolumeClaims with an OwnerReference for each run +func (pr *PipelineRun) HasVolumeClaimTemplate() bool { + for _, ws := range pr.Spec.Workspaces { + if ws.VolumeClaimTemplate != nil { + return true + } + } + return false +} + +// PipelineRunSpec defines the desired state of PipelineRun +type PipelineRunSpec struct { + // +optional + PipelineRef *PipelineRef `json:"pipelineRef,omitempty"` + // +optional + PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"` + // Params is a list of parameter names and values. + // +listType=atomic + Params []Param `json:"params,omitempty"` + + // Used for cancelling a pipelinerun (and maybe more later on) + // +optional + Status PipelineRunSpecStatus `json:"status,omitempty"` + // Time after which the Pipeline times out. + // Currently three keys are accepted in the map + // pipeline, tasks and finally + // with Timeouts.pipeline >= Timeouts.tasks + Timeouts.finally + // +optional + Timeouts *TimeoutFields `json:"timeouts,omitempty"` + + // TaskRunTemplate represent template of taskrun + // +optional + TaskRunTemplate PipelineTaskRunTemplate `json:"taskRunTemplate,omitempty"` + + // Workspaces holds a set of workspace bindings that must match names + // with those declared in the pipeline. + // +optional + // +listType=atomic + Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` + // TaskRunSpecs holds a set of runtime specs + // +optional + // +listType=atomic + TaskRunSpecs []PipelineTaskRunSpec `json:"taskRunSpecs,omitempty"` +} + +// TimeoutFields allows granular specification of pipeline, task, and finally timeouts +type TimeoutFields struct { + // Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value. + Pipeline *metav1.Duration `json:"pipeline,omitempty"` + // Tasks sets the maximum allowed duration of this pipeline's tasks + Tasks *metav1.Duration `json:"tasks,omitempty"` + // Finally sets the maximum allowed duration of this pipeline's finally + Finally *metav1.Duration `json:"finally,omitempty"` +} + +// PipelineRunSpecStatus defines the pipelinerun spec status the user can provide +type PipelineRunSpecStatus string + +const ( + // PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task, + // if not already cancelled or terminated + PipelineRunSpecStatusCancelled = "Cancelled" + + // PipelineRunSpecStatusCancelledRunFinally indicates that the user wants to cancel the pipeline run, + // if not already cancelled or terminated, but ensure finally is run normally + PipelineRunSpecStatusCancelledRunFinally = "CancelledRunFinally" + + // PipelineRunSpecStatusStoppedRunFinally indicates that the user wants to stop the pipeline run, + // wait for already running tasks to be completed and run finally + // if not already cancelled or terminated + PipelineRunSpecStatusStoppedRunFinally = "StoppedRunFinally" + + // PipelineRunSpecStatusPending indicates that the user wants to postpone starting a PipelineRun + // until some condition is met + PipelineRunSpecStatusPending = "PipelineRunPending" +) + +// PipelineRunStatus defines the observed state of PipelineRun +type PipelineRunStatus struct { + duckv1beta1.Status `json:",inline"` + + // PipelineRunStatusFields inlines the status fields. + PipelineRunStatusFields `json:",inline"` +} + +// PipelineRunReason represents a reason for the pipeline run "Succeeded" condition +type PipelineRunReason string + +const ( + // PipelineRunReasonStarted is the reason set when the PipelineRun has just started + PipelineRunReasonStarted PipelineRunReason = "Started" + // PipelineRunReasonRunning is the reason set when the PipelineRun is running + PipelineRunReasonRunning PipelineRunReason = "Running" + // PipelineRunReasonSuccessful is the reason set when the PipelineRun completed successfully + PipelineRunReasonSuccessful PipelineRunReason = "Succeeded" + // PipelineRunReasonCompleted is the reason set when the PipelineRun completed successfully with one or more skipped Tasks + PipelineRunReasonCompleted PipelineRunReason = "Completed" + // PipelineRunReasonFailed is the reason set when the PipelineRun completed with a failure + PipelineRunReasonFailed PipelineRunReason = "Failed" + // PipelineRunReasonCancelled is the reason set when the PipelineRun cancelled by the user + // This reason may be found with a corev1.ConditionFalse status, if the cancellation was processed successfully + // This reason may be found with a corev1.ConditionUnknown status, if the cancellation is being processed or failed + PipelineRunReasonCancelled PipelineRunReason = "Cancelled" + // PipelineRunReasonPending is the reason set when the PipelineRun is in the pending state + PipelineRunReasonPending PipelineRunReason = "PipelineRunPending" + // PipelineRunReasonTimedOut is the reason set when the PipelineRun has timed out + PipelineRunReasonTimedOut PipelineRunReason = "PipelineRunTimeout" + // PipelineRunReasonStopping indicates that no new Tasks will be scheduled by the controller, and the + // pipeline will stop once all running tasks complete their work + PipelineRunReasonStopping PipelineRunReason = "PipelineRunStopping" + // PipelineRunReasonCancelledRunningFinally indicates that pipeline has been gracefully cancelled + // and no new Tasks will be scheduled by the controller, but final tasks are now running + PipelineRunReasonCancelledRunningFinally PipelineRunReason = "CancelledRunningFinally" + // PipelineRunReasonStoppedRunningFinally indicates that pipeline has been gracefully stopped + // and no new Tasks will be scheduled by the controller, but final tasks are now running + PipelineRunReasonStoppedRunningFinally PipelineRunReason = "StoppedRunningFinally" +) + +func (t PipelineRunReason) String() string { + return string(t) +} + +var pipelineRunCondSet = apis.NewBatchConditionSet() + +// GetCondition returns the Condition matching the given type. +func (pr *PipelineRunStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return pipelineRunCondSet.Manage(pr).GetCondition(t) +} + +// InitializeConditions will set all conditions in pipelineRunCondSet to unknown for the PipelineRun +// and set the started time to the current time +func (pr *PipelineRunStatus) InitializeConditions(c clock.PassiveClock) { + started := false + if pr.StartTime.IsZero() { + pr.StartTime = &metav1.Time{Time: c.Now()} + started = true + } + conditionManager := pipelineRunCondSet.Manage(pr) + conditionManager.InitializeConditions() + // Ensure the started reason is set for the "Succeeded" condition + if started { + initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded) + initialCondition.Reason = PipelineRunReasonStarted.String() + conditionManager.SetCondition(*initialCondition) + } +} + +// SetCondition sets the condition, unsetting previous conditions with the same +// type as necessary. +func (pr *PipelineRunStatus) SetCondition(newCond *apis.Condition) { + if newCond != nil { + pipelineRunCondSet.Manage(pr).SetCondition(*newCond) + } +} + +// MarkSucceeded changes the Succeeded condition to True with the provided reason and message. +func (pr *PipelineRunStatus) MarkSucceeded(reason, messageFormat string, messageA ...interface{}) { + pipelineRunCondSet.Manage(pr).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...) + succeeded := pr.GetCondition(apis.ConditionSucceeded) + pr.CompletionTime = &succeeded.LastTransitionTime.Inner +} + +// MarkFailed changes the Succeeded condition to False with the provided reason and message. +func (pr *PipelineRunStatus) MarkFailed(reason, messageFormat string, messageA ...interface{}) { + pipelineRunCondSet.Manage(pr).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...) + succeeded := pr.GetCondition(apis.ConditionSucceeded) + pr.CompletionTime = &succeeded.LastTransitionTime.Inner +} + +// MarkRunning changes the Succeeded condition to Unknown with the provided reason and message. +func (pr *PipelineRunStatus) MarkRunning(reason, messageFormat string, messageA ...interface{}) { + pipelineRunCondSet.Manage(pr).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...) +} + +// ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun. +type ChildStatusReference struct { + runtime.TypeMeta `json:",inline"` + // Name is the name of the TaskRun or Run this is referencing. + Name string `json:"name,omitempty"` + // PipelineTaskName is the name of the PipelineTask this is referencing. + PipelineTaskName string `json:"pipelineTaskName,omitempty"` + + // WhenExpressions is the list of checks guarding the execution of the PipelineTask + // +optional + // +listType=atomic + WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` +} + +// PipelineRunStatusFields holds the fields of PipelineRunStatus' status. +// This is defined separately and inlined so that other types can readily +// consume these fields via duck typing. +type PipelineRunStatusFields struct { + // StartTime is the time the PipelineRun is actually started. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // CompletionTime is the time the PipelineRun completed. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + + // Results are the list of results written out by the pipeline task's containers + // +optional + // +listType=atomic + Results []PipelineRunResult `json:"results,omitempty"` + + // PipelineRunSpec contains the exact spec used to instantiate the run + PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"` + + // list of tasks that were skipped due to when expressions evaluating to false + // +optional + // +listType=atomic + SkippedTasks []SkippedTask `json:"skippedTasks,omitempty"` + + // list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun. + // +optional + // +listType=atomic + ChildReferences []ChildStatusReference `json:"childReferences,omitempty"` + + // FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed. + // +optional + FinallyStartTime *metav1.Time `json:"finallyStartTime,omitempty"` +} + +// SkippedTask is used to describe the Tasks that were skipped due to their When Expressions +// evaluating to False. This is a struct because we are looking into including more details +// about the When Expressions that caused this Task to be skipped. +type SkippedTask struct { + // Name is the Pipeline Task name + Name string `json:"name"` + // Reason is the cause of the PipelineTask being skipped. + Reason SkippingReason `json:"reason"` + // WhenExpressions is the list of checks guarding the execution of the PipelineTask + // +optional + // +listType=atomic + WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` +} + +// SkippingReason explains why a PipelineTask was skipped. +type SkippingReason string + +const ( + // WhenExpressionsSkip means the task was skipped due to at least one of its when expressions evaluating to false + WhenExpressionsSkip SkippingReason = "When Expressions evaluated to false" + // ParentTasksSkip means the task was skipped because its parent was skipped + ParentTasksSkip SkippingReason = "Parent Tasks were skipped" + // StoppingSkip means the task was skipped because the pipeline run is stopping + StoppingSkip SkippingReason = "PipelineRun was stopping" + // GracefullyCancelledSkip means the task was skipped because the pipeline run has been gracefully cancelled + GracefullyCancelledSkip SkippingReason = "PipelineRun was gracefully cancelled" + // GracefullyStoppedSkip means the task was skipped because the pipeline run has been gracefully stopped + GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped" + // MissingResultsSkip means the task was skipped because it's missing necessary results + MissingResultsSkip SkippingReason = "Results were missing" + // PipelineTimedOutSkip means the task was skipped because the PipelineRun has passed its overall timeout. + PipelineTimedOutSkip SkippingReason = "PipelineRun timeout has been reached" + // TasksTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Tasks. + TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached" + // FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally. + FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached" + // None means the task was not skipped + None SkippingReason = "None" +) + +// PipelineRunResult used to describe the results of a pipeline +type PipelineRunResult struct { + // Name is the result's name as declared by the Pipeline + Name string `json:"name"` + + // Value is the result returned from the execution of this PipelineRun + Value ResultValue `json:"value"` +} + +// PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status +type PipelineRunTaskRunStatus struct { + // PipelineTaskName is the name of the PipelineTask. + PipelineTaskName string `json:"pipelineTaskName,omitempty"` + // Status is the TaskRunStatus for the corresponding TaskRun + // +optional + Status *TaskRunStatus `json:"status,omitempty"` + // WhenExpressions is the list of checks guarding the execution of the PipelineTask + // +optional + // +listType=atomic + WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` +} + +// PipelineRunRunStatus contains the name of the PipelineTask for this Run and the Run's Status +type PipelineRunRunStatus struct { + // PipelineTaskName is the name of the PipelineTask. + PipelineTaskName string `json:"pipelineTaskName,omitempty"` + // Status is the RunStatus for the corresponding Run + // +optional + Status *runv1alpha1.RunStatus `json:"status,omitempty"` + // WhenExpressions is the list of checks guarding the execution of the PipelineTask + // +optional + // +listType=atomic + WhenExpressions []WhenExpression `json:"whenExpressions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PipelineRunList contains a list of PipelineRun +type PipelineRunList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []PipelineRun `json:"items,omitempty"` +} + +// PipelineTaskRun reports the results of running a step in the Task. Each +// task has the potential to succeed or fail (based on the exit code) +// and produces logs. +type PipelineTaskRun struct { + Name string `json:"name,omitempty"` +} + +// PipelineTaskRunSpec can be used to configure specific +// specs for a concrete Task +type PipelineTaskRunSpec struct { + PipelineTaskName string `json:"pipelineTaskName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty"` + PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"` + // +listType=atomic + StepOverrides []TaskRunStepOverride `json:"stepOverrides,omitempty"` + // +listType=atomic + SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"` + + // +optional + Metadata *PipelineTaskMetadata `json:"metadata,omitempty"` + + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` +} + +// GetTaskRunSpec returns the task specific spec for a given +// PipelineTask if configured, otherwise it returns the PipelineRun's default. +func (pr *PipelineRun) GetTaskRunSpec(pipelineTaskName string) PipelineTaskRunSpec { + s := PipelineTaskRunSpec{ + PipelineTaskName: pipelineTaskName, + ServiceAccountName: pr.Spec.TaskRunTemplate.ServiceAccountName, + PodTemplate: pr.Spec.TaskRunTemplate.PodTemplate, + } + for _, task := range pr.Spec.TaskRunSpecs { + if task.PipelineTaskName == pipelineTaskName { + if task.PodTemplate != nil { + s.PodTemplate = task.PodTemplate + } + if task.ServiceAccountName != "" { + s.ServiceAccountName = task.ServiceAccountName + } + s.StepOverrides = task.StepOverrides + s.SidecarOverrides = task.SidecarOverrides + s.Metadata = task.Metadata + s.ComputeResources = task.ComputeResources + } + } + return s +} + +// PipelineTaskRunTemplate is used to specify run specifications for all Task in pipelinerun. +type PipelineTaskRunTemplate struct { + // +optional + PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"` + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go new file mode 100644 index 0000000000..bc43c7d74b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipelinerun_validation.go @@ -0,0 +1,291 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +var _ apis.Validatable = (*PipelineRun)(nil) + +// Validate pipelinerun +func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { + if apis.IsInDelete(ctx) { + return nil + } + + errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata") + + if pr.IsPending() && pr.HasStarted() { + errs = errs.Also(apis.ErrInvalidValue("PipelineRun cannot be Pending after it is started", "spec.status")) + } + + return errs.Also(pr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate pipelinerun spec +func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + // Must have exactly one of pipelineRef and pipelineSpec. + if ps.PipelineRef == nil && ps.PipelineSpec == nil { + errs = errs.Also(apis.ErrMissingOneOf("pipelineRef", "pipelineSpec")) + } + if ps.PipelineRef != nil && ps.PipelineSpec != nil { + errs = errs.Also(apis.ErrMultipleOneOf("pipelineRef", "pipelineSpec")) + } + + // Validate PipelineRef if it's present + if ps.PipelineRef != nil { + errs = errs.Also(ps.PipelineRef.Validate(ctx).ViaField("pipelineRef")) + } + + // Validate PipelineSpec if it's present + if ps.PipelineSpec != nil { + ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) + errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) + } + + // Validate PipelineRun parameters + errs = errs.Also(ps.validatePipelineRunParameters(ctx)) + + // Validate propagated parameters + errs = errs.Also(ps.validateInlineParameters(ctx)) + + if ps.Timeouts != nil { + // tasks timeout should be a valid duration of at least 0. + errs = errs.Also(validateTimeoutDuration("tasks", ps.Timeouts.Tasks)) + + // finally timeout should be a valid duration of at least 0. + errs = errs.Also(validateTimeoutDuration("finally", ps.Timeouts.Finally)) + + // pipeline timeout should be a valid duration of at least 0. + errs = errs.Also(validateTimeoutDuration("pipeline", ps.Timeouts.Pipeline)) + + if ps.Timeouts.Pipeline != nil { + errs = errs.Also(ps.validatePipelineTimeout(ps.Timeouts.Pipeline.Duration, "should be <= pipeline duration")) + } else { + defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) + errs = errs.Also(ps.validatePipelineTimeout(defaultTimeout, "should be <= default timeout duration")) + } + } + + errs = errs.Also(validateSpecStatus(ps.Status)) + + if ps.Workspaces != nil { + wsNames := make(map[string]int) + for idx, ws := range ps.Workspaces { + errs = errs.Also(ws.Validate(ctx).ViaFieldIndex("workspaces", idx)) + if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), "name").ViaFieldIndex("workspaces", idx)) + } + wsNames[ws.Name] = idx + } + } + for idx, trs := range ps.TaskRunSpecs { + errs = errs.Also(validateTaskRunSpec(ctx, trs).ViaIndex(idx).ViaField("taskRunSpecs")) + } + + return errs +} + +func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) { + if len(ps.Params) == 0 { + return errs + } + + // Validate parameter types and uniqueness + errs = errs.Also(ValidateParameters(ctx, ps.Params).ViaField("params")) + + // Validate that task results aren't used in param values + for _, param := range ps.Params { + expressions, ok := GetVarSubstitutionExpressionsForParam(param) + if ok { + if LooksLikeContainsResultRefs(expressions) { + expressions = filter(expressions, looksLikeResultRef) + resultRefs := NewResultRefs(expressions) + if len(resultRefs) > 0 { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("cannot use result expressions in %v as PipelineRun parameter values", expressions), + "value").ViaFieldKey("params", param.Name)) + } + } + } + } + + return errs +} + +// validateInlineParameters validates parameters that are defined inline. +// This is crucial for propagated parameters since the parameters could +// be defined under pipelineRun and then called directly in the task steps. +// In this case, parameters cannot be validated by the underlying pipelineSpec +// or taskSpec since they may not have the parameters declared because of propagation. +func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) { + if ps.PipelineSpec == nil { + return errs + } + var paramSpec []ParamSpec + for _, p := range ps.Params { + pSpec := ParamSpec{ + Name: p.Name, + Default: &p.Value, + } + paramSpec = append(paramSpec, pSpec) + } + paramSpec = appendParamSpec(paramSpec, ps.PipelineSpec.Params) + for _, pt := range ps.PipelineSpec.Tasks { + paramSpec = appendParam(paramSpec, pt.Params) + if pt.TaskSpec != nil && pt.TaskSpec.Params != nil { + paramSpec = appendParamSpec(paramSpec, pt.TaskSpec.Params) + } + } + if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil { + for _, pt := range ps.PipelineSpec.Tasks { + if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { + errs = errs.Also(ValidateParameterVariables( + config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) + } + } + } + return errs +} + +func appendParamSpec(paramSpec []ParamSpec, params []ParamSpec) []ParamSpec { + for _, p := range params { + skip := false + for _, ps := range paramSpec { + if ps.Name == p.Name { + skip = true + break + } + } + if !skip { + paramSpec = append(paramSpec, p) + } + } + return paramSpec +} + +func appendParam(paramSpec []ParamSpec, params []Param) []ParamSpec { + for _, p := range params { + skip := false + for _, ps := range paramSpec { + if ps.Name == p.Name { + skip = true + break + } + } + if !skip { + pSpec := ParamSpec{ + Name: p.Name, + Default: &p.Value, + } + paramSpec = append(paramSpec, pSpec) + } + } + return paramSpec +} + +func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { + switch status { + case "": + return nil + case PipelineRunSpecStatusPending: + return nil + case PipelineRunSpecStatusCancelled, + PipelineRunSpecStatusCancelledRunFinally, + PipelineRunSpecStatusStoppedRunFinally: + return nil + } + + return apis.ErrInvalidValue(fmt.Sprintf("%s should be %s, %s, %s or %s", status, + PipelineRunSpecStatusCancelled, + PipelineRunSpecStatusCancelledRunFinally, + PipelineRunSpecStatusStoppedRunFinally, + PipelineRunSpecStatusPending), "status") + +} + +func validateTimeoutDuration(field string, d *metav1.Duration) (errs *apis.FieldError) { + if d != nil && d.Duration < 0 { + fieldPath := fmt.Sprintf("timeouts.%s", field) + return errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", d.Duration.String()), fieldPath)) + } + return nil +} + +func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorMsg string) (errs *apis.FieldError) { + if ps.Timeouts.Tasks != nil { + tasksTimeoutErr := false + tasksTimeoutStr := ps.Timeouts.Tasks.Duration.String() + if ps.Timeouts.Tasks.Duration > timeout && timeout != config.NoTimeoutDuration { + tasksTimeoutErr = true + } + if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { + tasksTimeoutErr = true + tasksTimeoutStr += " (no timeout)" + } + if tasksTimeoutErr { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", tasksTimeoutStr, errorMsg), "timeouts.tasks")) + } + } + + if ps.Timeouts.Finally != nil { + finallyTimeoutErr := false + finallyTimeoutStr := ps.Timeouts.Finally.Duration.String() + if ps.Timeouts.Finally.Duration > timeout && timeout != config.NoTimeoutDuration { + finallyTimeoutErr = true + } + if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { + finallyTimeoutErr = true + finallyTimeoutStr += " (no timeout)" + } + if finallyTimeoutErr { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s %s", finallyTimeoutStr, errorMsg), "timeouts.finally")) + } + } + + if ps.Timeouts.Tasks != nil && ps.Timeouts.Finally != nil { + if ps.Timeouts.Tasks.Duration+ps.Timeouts.Finally.Duration > timeout { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.tasks")) + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s + %s %s", ps.Timeouts.Tasks.Duration.String(), ps.Timeouts.Finally.Duration.String(), errorMsg), "timeouts.finally")) + } + } + return errs +} + +func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec) (errs *apis.FieldError) { + if trs.StepOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides")) + } + if trs.SidecarOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides")) + } + if trs.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(trs.ComputeResources, trs.StepOverrides)) + } + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go index bb547b2a0f..140a3ffb7a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go @@ -28,21 +28,11 @@ type ResolverRef struct { // resolution of the referenced Tekton resource, such as "git". // +optional Resolver ResolverName `json:"resolver,omitempty"` - // Resource contains the parameters used to identify the + // Params contains the parameters used to identify the // referenced Tekton resource. Example entries might include // "repo" or "path" but the set of params ultimately depends on // the chosen resolver. // +optional // +listType=atomic - Resource []ResolverParam `json:"resource,omitempty"` -} - -// ResolverParam is a single parameter passed to a resolver. -type ResolverParam struct { - // Name is the name of the parameter that will be passed to the - // resolver. - Name string `json:"name"` - // Value is the string value of the parameter that will be - // passed to the resolver. - Value string `json:"value"` + Params []Param `json:"params,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go index 1c026e1a37..8fcc1b9f5a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resultref.go @@ -39,6 +39,8 @@ const ( objectResultExpressionFormat = "tasks..results.." // ResultTaskPart Constant used to define the "tasks" part of a pipeline result reference ResultTaskPart = "tasks" + // ResultFinallyPart Constant used to define the "finally" part of a task result reference + ResultFinallyPart = "finally" // ResultResultPart Constant used to define the "results" part of a pipeline result reference ResultResultPart = "results" // TODO(#2462) use one regex across all substitutions @@ -92,7 +94,8 @@ func LooksLikeContainsResultRefs(expressions []string) bool { // looksLikeResultRef attempts to check if the given string looks like it contains any // result references. Returns true if it does, false otherwise func looksLikeResultRef(expression string) bool { - return strings.HasPrefix(expression, "task") && strings.Contains(expression, ".result") + subExpressions := strings.Split(expression, ".") + return len(subExpressions) >= 4 && (subExpressions[0] == ResultTaskPart || subExpressions[0] == ResultFinallyPart) && subExpressions[2] == ResultResultPart } // GetVarSubstitutionExpressionsForParam extracts all the value between "$(" and ")"" for a parameter @@ -161,24 +164,22 @@ func stripVarSubExpression(expression string) string { // - Output: "", "", 0, "", error // TODO: may use regex for each type to handle possible reference formats func parseExpression(substitutionExpression string) (string, string, int, string, error) { - subExpressions := strings.Split(substitutionExpression, ".") - - // For string result: tasks..results. - // For array result: tasks..results.[index] - if len(subExpressions) == 4 && subExpressions[0] == ResultTaskPart && subExpressions[2] == ResultResultPart { - resultName, stringIdx := ParseResultName(subExpressions[3]) - if stringIdx != "" { - intIdx, _ := strconv.Atoi(stringIdx) - return subExpressions[1], resultName, intIdx, "", nil + if looksLikeResultRef(substitutionExpression) { + subExpressions := strings.Split(substitutionExpression, ".") + // For string result: tasks..results. + // For array result: tasks..results.[index] + if len(subExpressions) == 4 { + resultName, stringIdx := ParseResultName(subExpressions[3]) + if stringIdx != "" { + intIdx, _ := strconv.Atoi(stringIdx) + return subExpressions[1], resultName, intIdx, "", nil + } + return subExpressions[1], resultName, 0, "", nil + } else if len(subExpressions) == 5 { + // For object type result: tasks..results.. + return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil } - return subExpressions[1], resultName, 0, "", nil } - - // For object type result: tasks..results.. - if len(subExpressions) == 5 && subExpressions[0] == ResultTaskPart && subExpressions[2] == ResultResultPart { - return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil - } - return "", "", 0, "", fmt.Errorf("Must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) } @@ -199,7 +200,11 @@ func ParseResultName(resultName string) (string, string) { // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - for _, p := range append(pt.Params, pt.Matrix...) { + var matrixParams []Param + if pt.IsMatrixed() { + matrixParams = pt.Matrix.Params + } + for _, p := range append(pt.Params, matrixParams...) { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json index 4180dc28a3..b48f98ce5e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -140,6 +140,35 @@ } } }, + "v1.ChildStatusReference": { + "description": "ChildStatusReference is used to point to the statuses of individual TaskRuns and Runs within this PipelineRun.", + "type": "object", + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "description": "Name is the name of the TaskRun or Run this is referencing.", + "type": "string" + }, + "pipelineTaskName": { + "description": "PipelineTaskName is the name of the PipelineTask this is referencing.", + "type": "string" + }, + "whenExpressions": { + "description": "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WhenExpression" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1.EmbeddedTask": { "description": "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.", "type": "object", @@ -223,6 +252,21 @@ } } }, + "v1.Matrix": { + "description": "Matrix is used to fan out Tasks in a Pipeline", + "type": "object", + "properties": { + "params": { + "description": "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Param" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1.Param": { "description": "Param declares an ParamValues to use for the parameter called name.", "type": "object", @@ -362,6 +406,20 @@ } } }, + "v1.PipelineRef": { + "description": "PipelineRef can be used to refer to a specific instance of a Pipeline.", + "type": "object", + "properties": { + "apiVersion": { + "description": "API version of the referent", + "type": "string" + }, + "name": { + "description": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "type": "string" + } + } + }, "v1.PipelineResult": { "description": "PipelineResult used to describe the results of a pipeline", "type": "object", @@ -391,6 +449,296 @@ } } }, + "v1.PipelineRun": { + "description": "PipelineRun represents a single execution of a Pipeline. PipelineRuns are how the graph of Tasks declared in a Pipeline are executed; they specify inputs to Pipelines such as parameter values and capture operational aspects of the Tasks execution such as service account and tolerations. Creating a PipelineRun creates TaskRuns for Tasks in the referenced Pipeline.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "default": {}, + "$ref": "#/definitions/v1.PipelineRunSpec" + }, + "status": { + "default": {}, + "$ref": "#/definitions/v1.PipelineRunStatus" + } + } + }, + "v1.PipelineRunList": { + "description": "PipelineRunList contains a list of PipelineRun", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.PipelineRun" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, + "v1.PipelineRunResult": { + "description": "PipelineRunResult used to describe the results of a pipeline", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name is the result's name as declared by the Pipeline", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the result returned from the execution of this PipelineRun", + "default": {}, + "$ref": "#/definitions/v1.ParamValue" + } + } + }, + "v1.PipelineRunRunStatus": { + "description": "PipelineRunRunStatus contains the name of the PipelineTask for this Run and the Run's Status", + "type": "object", + "properties": { + "pipelineTaskName": { + "description": "PipelineTaskName is the name of the PipelineTask.", + "type": "string" + }, + "status": { + "description": "Status is the RunStatus for the corresponding Run", + "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.apis.run.v1alpha1.RunStatus" + }, + "whenExpressions": { + "description": "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WhenExpression" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.PipelineRunSpec": { + "description": "PipelineRunSpec defines the desired state of PipelineRun", + "type": "object", + "properties": { + "params": { + "description": "Params is a list of parameter names and values.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Param" + }, + "x-kubernetes-list-type": "atomic" + }, + "pipelineRef": { + "$ref": "#/definitions/v1.PipelineRef" + }, + "pipelineSpec": { + "$ref": "#/definitions/v1.PipelineSpec" + }, + "status": { + "description": "Used for cancelling a pipelinerun (and maybe more later on)", + "type": "string" + }, + "taskRunSpecs": { + "description": "TaskRunSpecs holds a set of runtime specs", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.PipelineTaskRunSpec" + }, + "x-kubernetes-list-type": "atomic" + }, + "taskRunTemplate": { + "description": "TaskRunTemplate represent template of taskrun", + "default": {}, + "$ref": "#/definitions/v1.PipelineTaskRunTemplate" + }, + "timeouts": { + "description": "Time after which the Pipeline times out. Currently three keys are accepted in the map pipeline, tasks and finally with Timeouts.pipeline \u003e= Timeouts.tasks + Timeouts.finally", + "$ref": "#/definitions/v1.TimeoutFields" + }, + "workspaces": { + "description": "Workspaces holds a set of workspace bindings that must match names with those declared in the pipeline.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceBinding" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.PipelineRunStatus": { + "description": "PipelineRunStatus defines the observed state of PipelineRun", + "type": "object", + "properties": { + "annotations": { + "description": "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "childReferences": { + "description": "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ChildStatusReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "completionTime": { + "description": "CompletionTime is the time the PipelineRun completed.", + "$ref": "#/definitions/v1.Time" + }, + "conditions": { + "description": "Conditions the latest available observations of a resource's current state.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/knative.Condition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, + "observedGeneration": { + "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + "type": "integer", + "format": "int64" + }, + "pipelineSpec": { + "description": "PipelineRunSpec contains the exact spec used to instantiate the run", + "$ref": "#/definitions/v1.PipelineSpec" + }, + "results": { + "description": "Results are the list of results written out by the pipeline task's containers", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.PipelineRunResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "skippedTasks": { + "description": "list of tasks that were skipped due to when expressions evaluating to false", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.SkippedTask" + }, + "x-kubernetes-list-type": "atomic" + }, + "startTime": { + "description": "StartTime is the time the PipelineRun is actually started.", + "$ref": "#/definitions/v1.Time" + } + } + }, + "v1.PipelineRunStatusFields": { + "description": "PipelineRunStatusFields holds the fields of PipelineRunStatus' status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", + "type": "object", + "properties": { + "childReferences": { + "description": "list of TaskRun and Run names, PipelineTask names, and API versions/kinds for children of this PipelineRun.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ChildStatusReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "completionTime": { + "description": "CompletionTime is the time the PipelineRun completed.", + "$ref": "#/definitions/v1.Time" + }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, + "pipelineSpec": { + "description": "PipelineRunSpec contains the exact spec used to instantiate the run", + "$ref": "#/definitions/v1.PipelineSpec" + }, + "results": { + "description": "Results are the list of results written out by the pipeline task's containers", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.PipelineRunResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "skippedTasks": { + "description": "list of tasks that were skipped due to when expressions evaluating to false", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.SkippedTask" + }, + "x-kubernetes-list-type": "atomic" + }, + "startTime": { + "description": "StartTime is the time the PipelineRun is actually started.", + "$ref": "#/definitions/v1.Time" + } + } + }, + "v1.PipelineRunTaskRunStatus": { + "description": "PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status", + "type": "object", + "properties": { + "pipelineTaskName": { + "description": "PipelineTaskName is the name of the PipelineTask.", + "type": "string" + }, + "status": { + "description": "Status is the TaskRunStatus for the corresponding TaskRun", + "$ref": "#/definitions/v1.TaskRunStatus" + }, + "whenExpressions": { + "description": "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WhenExpression" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1.PipelineSpec": { "description": "PipelineSpec defines the desired state of Pipeline.", "type": "object", @@ -452,12 +800,7 @@ "properties": { "matrix": { "description": "Matrix declares parameters used to fan out this task.", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1.Param" - }, - "x-kubernetes-list-type": "atomic" + "$ref": "#/definitions/v1.Matrix" }, "name": { "description": "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.", @@ -555,54 +898,93 @@ } } }, - "v1.PipelineWorkspaceDeclaration": { - "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "v1.PipelineTaskRun": { + "description": "PipelineTaskRun reports the results of running a step in the Task. Each task has the potential to succeed or fail (based on the exit code) and produces logs.", "type": "object", - "required": [ - "name" - ], "properties": { - "description": { - "description": "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", - "type": "string" - }, "name": { - "description": "Name is the name of a workspace to be provided by a PipelineRun.", - "type": "string", - "default": "" - }, - "optional": { - "description": "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", - "type": "boolean" + "type": "string" } } }, - "v1.PropertySpec": { - "description": "PropertySpec defines the struct for object keys", + "v1.PipelineTaskRunSpec": { + "description": "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task", "type": "object", "properties": { - "type": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "metadata": { + "$ref": "#/definitions/v1.PipelineTaskMetadata" + }, + "pipelineTaskName": { + "type": "string" + }, + "podTemplate": { + "$ref": "#/definitions/pod.Template" + }, + "serviceAccountName": { + "type": "string" + }, + "sidecarOverrides": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunSidecarOverride" + }, + "x-kubernetes-list-type": "atomic" + }, + "stepOverrides": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunStepOverride" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.PipelineTaskRunTemplate": { + "description": "PipelineTaskRunTemplate is used to specify run specifications for all Task in pipelinerun.", + "type": "object", + "properties": { + "podTemplate": { + "$ref": "#/definitions/pod.Template" + }, + "serviceAccountName": { "type": "string" } } }, - "v1.ResolverParam": { - "description": "ResolverParam is a single parameter passed to a resolver.", + "v1.PipelineWorkspaceDeclaration": { + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", "type": "object", "required": [ - "name", - "value" + "name" ], "properties": { + "description": { + "description": "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + "type": "string" + }, "name": { - "description": "Name is the name of the parameter that will be passed to the resolver.", + "description": "Name is the name of a workspace to be provided by a PipelineRun.", "type": "string", "default": "" }, - "value": { - "description": "Value is the string value of the parameter that will be passed to the resolver.", - "type": "string", - "default": "" + "optional": { + "description": "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + } + } + }, + "v1.PropertySpec": { + "description": "PropertySpec defines the struct for object keys", + "type": "object", + "properties": { + "type": { + "type": "string" } } }, @@ -610,18 +992,18 @@ "description": "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", "type": "object", "properties": { - "resolver": { - "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", - "type": "string" - }, - "resource": { - "description": "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + "params": { + "description": "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1.ResolverParam" + "$ref": "#/definitions/v1.Param" }, "x-kubernetes-list-type": "atomic" + }, + "resolver": { + "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + "type": "string" } } }, @@ -813,6 +1195,62 @@ } } }, + "v1.SidecarState": { + "description": "SidecarState reports the results of running a sidecar in a Task.", + "type": "object", + "properties": { + "container": { + "type": "string" + }, + "imageID": { + "type": "string" + }, + "name": { + "type": "string" + }, + "running": { + "description": "Details about a running container", + "$ref": "#/definitions/v1.ContainerStateRunning" + }, + "terminated": { + "description": "Details about a terminated container", + "$ref": "#/definitions/v1.ContainerStateTerminated" + }, + "waiting": { + "description": "Details about a waiting container", + "$ref": "#/definitions/v1.ContainerStateWaiting" + } + } + }, + "v1.SkippedTask": { + "description": "SkippedTask is used to describe the Tasks that were skipped due to their When Expressions evaluating to False. This is a struct because we are looking into including more details about the When Expressions that caused this Task to be skipped.", + "type": "object", + "required": [ + "name", + "reason" + ], + "properties": { + "name": { + "description": "Name is the Pipeline Task name", + "type": "string", + "default": "" + }, + "reason": { + "description": "Reason is the cause of the PipelineTask being skipped.", + "type": "string", + "default": "" + }, + "whenExpressions": { + "description": "WhenExpressions is the list of checks guarding the execution of the PipelineTask", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WhenExpression" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1.Step": { "description": "Step runs a subcomponent of a Task", "type": "object", @@ -947,6 +1385,33 @@ } } }, + "v1.StepState": { + "description": "StepState reports the results of running a step in a Task.", + "type": "object", + "properties": { + "container": { + "type": "string" + }, + "imageID": { + "type": "string" + }, + "name": { + "type": "string" + }, + "running": { + "description": "Details about a running container", + "$ref": "#/definitions/v1.ContainerStateRunning" + }, + "terminated": { + "description": "Details about a terminated container", + "$ref": "#/definitions/v1.ContainerStateTerminated" + }, + "waiting": { + "description": "Details about a waiting container", + "$ref": "#/definitions/v1.ContainerStateWaiting" + } + } + }, "v1.StepTemplate": { "description": "StepTemplate is a template for a Step", "type": "object", @@ -1133,6 +1598,88 @@ } } }, + "v1.TaskRun": { + "description": "TaskRun represents a single execution of a Task. TaskRuns are how the steps specified in a Task are executed; they specify the parameters and resources used to run the steps in a Task.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunSpec" + }, + "status": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunStatus" + } + } + }, + "v1.TaskRunDebug": { + "description": "TaskRunDebug defines the breakpoint config for a particular TaskRun", + "type": "object", + "properties": { + "breakpoint": { + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.TaskRunInputs": { + "description": "TaskRunInputs holds the input values that this task was invoked with.", + "type": "object", + "properties": { + "params": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Param" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.TaskRunList": { + "description": "TaskRunList contains a list of TaskRun", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRun" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, "v1.TaskRunResult": { "description": "TaskRunResult used to describe the results of a task", "type": "object", @@ -1157,6 +1704,268 @@ } } }, + "v1.TaskRunSidecarOverride": { + "description": "TaskRunSidecarOverride is used to override the values of a Sidecar in the corresponding Task.", + "type": "object", + "required": [ + "name", + "resources" + ], + "properties": { + "name": { + "description": "The name of the Sidecar to override.", + "type": "string", + "default": "" + }, + "resources": { + "description": "The resource requirements to apply to the Sidecar.", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + } + } + }, + "v1.TaskRunSpec": { + "description": "TaskRunSpec defines the desired state of TaskRun", + "type": "object", + "properties": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "debug": { + "$ref": "#/definitions/v1.TaskRunDebug" + }, + "params": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Param" + }, + "x-kubernetes-list-type": "atomic" + }, + "podTemplate": { + "description": "PodTemplate holds pod specific configuration", + "$ref": "#/definitions/pod.Template" + }, + "serviceAccountName": { + "type": "string", + "default": "" + }, + "sidecarOverrides": { + "description": "Overrides to apply to Sidecars in this TaskRun. If a field is specified in both a Sidecar and a SidecarOverride, the value from the SidecarOverride will be used. This field is only supported when the alpha feature gate is enabled.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunSidecarOverride" + }, + "x-kubernetes-list-type": "atomic" + }, + "status": { + "description": "Used for cancelling a taskrun (and maybe more later on)", + "type": "string" + }, + "statusMessage": { + "description": "Status message for cancellation.", + "type": "string" + }, + "stepOverrides": { + "description": "Overrides to apply to Steps in this TaskRun. If a field is specified in both a Step and a StepOverride, the value from the StepOverride will be used. This field is only supported when the alpha feature gate is enabled.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunStepOverride" + }, + "x-kubernetes-list-type": "atomic" + }, + "taskRef": { + "description": "no more than one of the TaskRef and TaskSpec may be specified.", + "$ref": "#/definitions/v1.TaskRef" + }, + "taskSpec": { + "$ref": "#/definitions/v1.TaskSpec" + }, + "timeout": { + "description": "Time after which the build times out. Defaults to 1 hour. Specified build timeout should be less than 24h. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "$ref": "#/definitions/v1.Duration" + }, + "workspaces": { + "description": "Workspaces is a list of WorkspaceBindings from volumes to workspaces.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceBinding" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.TaskRunStatus": { + "description": "TaskRunStatus defines the observed state of TaskRun", + "type": "object", + "required": [ + "podName" + ], + "properties": { + "annotations": { + "description": "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "completionTime": { + "description": "CompletionTime is the time the build completed.", + "$ref": "#/definitions/v1.Time" + }, + "conditions": { + "description": "Conditions the latest available observations of a resource's current state.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/knative.Condition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "observedGeneration": { + "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + "type": "integer", + "format": "int64" + }, + "podName": { + "description": "PodName is the name of the pod responsible for executing this task's steps.", + "type": "string", + "default": "" + }, + "results": { + "description": "Results are the list of results written out by the task's containers", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "retriesStatus": { + "description": "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunStatus" + }, + "x-kubernetes-list-type": "atomic" + }, + "sidecars": { + "description": "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.SidecarState" + }, + "x-kubernetes-list-type": "atomic" + }, + "startTime": { + "description": "StartTime is the time the build is actually started.", + "$ref": "#/definitions/v1.Time" + }, + "steps": { + "description": "Steps describes the state of each build step container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.StepState" + }, + "x-kubernetes-list-type": "atomic" + }, + "taskSpec": { + "description": "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.", + "$ref": "#/definitions/v1.TaskSpec" + } + } + }, + "v1.TaskRunStatusFields": { + "description": "TaskRunStatusFields holds the fields of TaskRun's status. This is defined separately and inlined so that other types can readily consume these fields via duck typing.", + "type": "object", + "required": [ + "podName" + ], + "properties": { + "completionTime": { + "description": "CompletionTime is the time the build completed.", + "$ref": "#/definitions/v1.Time" + }, + "podName": { + "description": "PodName is the name of the pod responsible for executing this task's steps.", + "type": "string", + "default": "" + }, + "results": { + "description": "Results are the list of results written out by the task's containers", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "retriesStatus": { + "description": "RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskRunStatus" + }, + "x-kubernetes-list-type": "atomic" + }, + "sidecars": { + "description": "The list has one entry per sidecar in the manifest. Each entry is represents the imageid of the corresponding sidecar.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.SidecarState" + }, + "x-kubernetes-list-type": "atomic" + }, + "startTime": { + "description": "StartTime is the time the build is actually started.", + "$ref": "#/definitions/v1.Time" + }, + "steps": { + "description": "Steps describes the state of each build step container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.StepState" + }, + "x-kubernetes-list-type": "atomic" + }, + "taskSpec": { + "description": "TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun.", + "$ref": "#/definitions/v1.TaskSpec" + } + } + }, + "v1.TaskRunStepOverride": { + "description": "TaskRunStepOverride is used to override the values of a Step in the corresponding Task.", + "type": "object", + "required": [ + "name", + "resources" + ], + "properties": { + "name": { + "description": "The name of the Step to override.", + "type": "string", + "default": "" + }, + "resources": { + "description": "The resource requirements to apply to the Step.", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + } + } + }, "v1.TaskSpec": { "description": "TaskSpec defines the desired state of Task.", "type": "object", @@ -1225,6 +2034,24 @@ } } }, + "v1.TimeoutFields": { + "description": "TimeoutFields allows granular specification of pipeline, task, and finally timeouts", + "type": "object", + "properties": { + "finally": { + "description": "Finally sets the maximum allowed duration of this pipeline's finally", + "$ref": "#/definitions/v1.Duration" + }, + "pipeline": { + "description": "Pipeline sets the maximum allowed duration for execution of the entire pipeline. The sum of individual timeouts for tasks and finally must not exceed this value.", + "$ref": "#/definitions/v1.Duration" + }, + "tasks": { + "description": "Tasks sets the maximum allowed duration of this pipeline's tasks", + "$ref": "#/definitions/v1.Duration" + } + } + }, "v1.WhenExpression": { "description": "WhenExpression allows a PipelineTask to declare expressions to be evaluated before the Task is run to determine whether the Task should be executed or skipped", "type": "object", @@ -1266,6 +2093,10 @@ "description": "ConfigMap represents a configMap that should populate this workspace.", "$ref": "#/definitions/v1.ConfigMapVolumeSource" }, + "csi": { + "description": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + "$ref": "#/definitions/v1.CSIVolumeSource" + }, "emptyDir": { "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", "$ref": "#/definitions/v1.EmptyDirVolumeSource" @@ -1279,6 +2110,10 @@ "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" }, + "projected": { + "description": "Projected represents a projected volume that should populate this workspace.", + "$ref": "#/definitions/v1.ProjectedVolumeSource" + }, "secret": { "description": "Secret represents a secret that should populate this workspace.", "$ref": "#/definitions/v1.SecretVolumeSource" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go index 96dafd32b3..3406ea2108 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -27,7 +27,6 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "github.com/tektoncd/pipeline/pkg/apis/version" - "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/substitution" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -278,13 +277,13 @@ func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis // when the enable-api-fields feature gate is not "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) } - errs = errs.Also(p.ValidateType()) + errs = errs.Also(p.ValidateType(ctx)) } return errs } // ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type -func (p ParamSpec) ValidateType() *apis.FieldError { +func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // Ensure param has a valid type. validType := false for _, allowedType := range AllParamTypes { @@ -309,17 +308,20 @@ func (p ParamSpec) ValidateType() *apis.FieldError { } // Check object type and its PropertySpec type - return p.ValidateObjectType() + return p.ValidateObjectType(ctx) } // ValidateObjectType checks that object type parameter does not miss the // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) -func (p ParamSpec) ValidateObjectType() *apis.FieldError { +func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { if p.Type == ParamTypeObject && p.Properties == nil { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + // If this we are not skipping validation checks due to propagated params + // then properties field is required. + if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } } - invalidKeys := []string{} for key, propertySpec := range p.Properties { if propertySpec.Type != ParamTypeString { @@ -364,10 +366,9 @@ func ValidateParameterVariables(ctx context.Context, steps []Step, params []Para errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) } - errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) - errs = errs.Also(validateObjectDefault(objectParamSpecs)) - return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) + return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { @@ -404,45 +405,6 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectDefault validates the keys of all the object params within a -// slice of ParamSpecs are provided in default iff the default section is provided. -func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { - for _, p := range objectParams { - errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) - } - return errs -} - -// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. -func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ParamValue) (errs *apis.FieldError) { - if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { - return nil - } - - neededKeys := []string{} - providedKeys := []string{} - - // collect all needed keys - for key := range properties { - neededKeys = append(neededKeys, key) - } - - // collect all provided keys - for key := range propertiesProvider.ObjectVal { - providedKeys = append(providedKeys, key) - } - - missings := list.DiffLeft(neededKeys, providedKeys) - if len(missings) != 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), - Paths: []string{"properties", "default"}, - } - } - - return nil -} - // validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings // i.e. param.objectParam, param.objectParam[*] func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go index 245d1f5eb0..87f8bb4c54 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskref_validation.go @@ -18,6 +18,7 @@ package v1 import ( "context" + "fmt" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/version" @@ -31,22 +32,37 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - switch { - case ref.Resolver != "": - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + if ref.Resolver != "" || ref.Params != nil { + if ref.Resolver != "" { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + } } - case ref.Resource != nil: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) + if ref.Params != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "params", config.AlphaAPIFields).ViaField("params")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "params")) + } + if ref.Resolver == "" { + errs = errs.Also(apis.ErrMissingField("resolver")) + } + errs = errs.Also(ValidateParameters(ctx, ref.Params)) + errs = errs.Also(validateResolutionParamTypes(ref.Params).ViaField("params")) } - if ref.Resolver == "" { - errs = errs.Also(apis.ErrMissingField("resolver")) - } - case ref.Name == "": + } else if ref.Name == "" { errs = errs.Also(apis.ErrMissingField("name")) } return } + +func validateResolutionParamTypes(params []Param) (errs *apis.FieldError) { + for i, p := range params { + if p.Value.Type == ParamTypeArray || p.Value.Type == ParamTypeObject { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("remote resolution parameter type must be %s, not %s", + string(ParamTypeString), string(p.Value.Type))).ViaIndex(i)) + } + } + + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_conversion.go new file mode 100644 index 0000000000..102c7cdf56 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_conversion.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +var _ apis.Convertible = (*TaskRun)(nil) + +// ConvertTo implements apis.Convertible +func (tr *TaskRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (tr *TaskRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go new file mode 100644 index 0000000000..61932f4668 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_defaults.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" +) + +var _ apis.Defaultable = (*TaskRun)(nil) + +// ManagedByLabelKey is the label key used to mark what is managing this resource +const ManagedByLabelKey = "app.kubernetes.io/managed-by" + +// SetDefaults implements apis.Defaultable +func (tr *TaskRun) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, tr.ObjectMeta) + tr.Spec.SetDefaults(ctx) + + // If the TaskRun doesn't have a managed-by label, apply the default + // specified in the config. + cfg := config.FromContextOrDefaults(ctx) + if tr.ObjectMeta.Labels == nil { + tr.ObjectMeta.Labels = map[string]string{} + } + if _, found := tr.ObjectMeta.Labels[ManagedByLabelKey]; !found { + tr.ObjectMeta.Labels[ManagedByLabelKey] = cfg.Defaults.DefaultManagedByLabelValue + } +} + +// SetDefaults implements apis.Defaultable +func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + if trs.TaskRef != nil && trs.TaskRef.Kind == "" { + trs.TaskRef.Kind = NamespacedTaskKind + } + + if trs.Timeout == nil { + trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} + } + + defaultSA := cfg.Defaults.DefaultServiceAccount + if trs.ServiceAccountName == "" && defaultSA != "" { + trs.ServiceAccountName = defaultSA + } + + defaultPodTemplate := cfg.Defaults.DefaultPodTemplate + trs.PodTemplate = pod.MergePodTemplateWithDefault(trs.PodTemplate, defaultPodTemplate) + + // If this taskrun has an embedded task, apply the usual task defaults + if trs.TaskSpec != nil { + trs.TaskSpec.SetDefaults(ctx) + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go new file mode 100644 index 0000000000..d0ba95d91e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_types.go @@ -0,0 +1,419 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/clock" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// TaskRunSpec defines the desired state of TaskRun +type TaskRunSpec struct { + // +optional + Debug *TaskRunDebug `json:"debug,omitempty"` + // +optional + // +listType=atomic + Params []Param `json:"params,omitempty"` + // +optional + ServiceAccountName string `json:"serviceAccountName"` + // no more than one of the TaskRef and TaskSpec may be specified. + // +optional + TaskRef *TaskRef `json:"taskRef,omitempty"` + // +optional + TaskSpec *TaskSpec `json:"taskSpec,omitempty"` + // Used for cancelling a taskrun (and maybe more later on) + // +optional + Status TaskRunSpecStatus `json:"status,omitempty"` + // Status message for cancellation. + // +optional + StatusMessage TaskRunSpecStatusMessage `json:"statusMessage,omitempty"` + // Time after which the build times out. Defaults to 1 hour. + // Specified build timeout should be less than 24h. + // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + // PodTemplate holds pod specific configuration + PodTemplate *pod.PodTemplate `json:"podTemplate,omitempty"` + // Workspaces is a list of WorkspaceBindings from volumes to workspaces. + // +optional + // +listType=atomic + Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` + // Overrides to apply to Steps in this TaskRun. + // If a field is specified in both a Step and a StepOverride, + // the value from the StepOverride will be used. + // This field is only supported when the alpha feature gate is enabled. + // +optional + // +listType=atomic + StepOverrides []TaskRunStepOverride `json:"stepOverrides,omitempty"` + // Overrides to apply to Sidecars in this TaskRun. + // If a field is specified in both a Sidecar and a SidecarOverride, + // the value from the SidecarOverride will be used. + // This field is only supported when the alpha feature gate is enabled. + // +optional + // +listType=atomic + SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"` + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` +} + +// TaskRunSpecStatus defines the taskrun spec status the user can provide +type TaskRunSpecStatus string + +const ( + // TaskRunSpecStatusCancelled indicates that the user wants to cancel the task, + // if not already cancelled or terminated + TaskRunSpecStatusCancelled = "TaskRunCancelled" +) + +// TaskRunSpecStatusMessage defines human readable status messages for the TaskRun. +type TaskRunSpecStatusMessage string + +const ( + // TaskRunCancelledByPipelineMsg indicates that the PipelineRun of which this + // TaskRun was a part of has been cancelled. + TaskRunCancelledByPipelineMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has been cancelled." +) + +// TaskRunDebug defines the breakpoint config for a particular TaskRun +type TaskRunDebug struct { + // +optional + // +listType=atomic + Breakpoint []string `json:"breakpoint,omitempty"` +} + +// TaskRunInputs holds the input values that this task was invoked with. +type TaskRunInputs struct { + // +optional + // +listType=atomic + Params []Param `json:"params,omitempty"` +} + +var taskRunCondSet = apis.NewBatchConditionSet() + +// TaskRunStatus defines the observed state of TaskRun +type TaskRunStatus struct { + duckv1.Status `json:",inline"` + + // TaskRunStatusFields inlines the status fields. + TaskRunStatusFields `json:",inline"` +} + +// TaskRunReason is an enum used to store all TaskRun reason for +// the Succeeded condition that are controlled by the TaskRun itself. Failure +// reasons that emerge from underlying resources are not included here +type TaskRunReason string + +const ( + // TaskRunReasonStarted is the reason set when the TaskRun has just started + TaskRunReasonStarted TaskRunReason = "Started" + // TaskRunReasonRunning is the reason set when the TaskRun is running + TaskRunReasonRunning TaskRunReason = "Running" + // TaskRunReasonSuccessful is the reason set when the TaskRun completed successfully + TaskRunReasonSuccessful TaskRunReason = "Succeeded" + // TaskRunReasonFailed is the reason set when the TaskRun completed with a failure + TaskRunReasonFailed TaskRunReason = "Failed" + // TaskRunReasonCancelled is the reason set when the Taskrun is cancelled by the user + TaskRunReasonCancelled TaskRunReason = "TaskRunCancelled" + // TaskRunReasonTimedOut is the reason set when the Taskrun has timed out + TaskRunReasonTimedOut TaskRunReason = "TaskRunTimeout" + // TaskRunReasonResolvingTaskRef indicates that the TaskRun is waiting for + // its taskRef to be asynchronously resolved. + TaskRunReasonResolvingTaskRef = "ResolvingTaskRef" + // TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled + TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed" +) + +func (t TaskRunReason) String() string { + return string(t) +} + +// GetStartedReason returns the reason set to the "Succeeded" condition when +// InitializeConditions is invoked +func (trs *TaskRunStatus) GetStartedReason() string { + return TaskRunReasonStarted.String() +} + +// GetRunningReason returns the reason set to the "Succeeded" condition when +// the TaskRun starts running. This is used indicate that the resource +// could be validated is starting to perform its job. +func (trs *TaskRunStatus) GetRunningReason() string { + return TaskRunReasonRunning.String() +} + +// MarkResourceOngoing sets the ConditionSucceeded condition to ConditionUnknown +// with the reason and message. +func (trs *TaskRunStatus) MarkResourceOngoing(reason TaskRunReason, message string) { + taskRunCondSet.Manage(trs).SetCondition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionUnknown, + Reason: reason.String(), + Message: message, + }) +} + +// MarkResourceFailed sets the ConditionSucceeded condition to ConditionFalse +// based on an error that occurred and a reason +func (trs *TaskRunStatus) MarkResourceFailed(reason TaskRunReason, err error) { + taskRunCondSet.Manage(trs).SetCondition(apis.Condition{ + Type: apis.ConditionSucceeded, + Status: corev1.ConditionFalse, + Reason: reason.String(), + Message: err.Error(), + }) + succeeded := trs.GetCondition(apis.ConditionSucceeded) + trs.CompletionTime = &succeeded.LastTransitionTime.Inner +} + +// TaskRunStatusFields holds the fields of TaskRun's status. This is defined +// separately and inlined so that other types can readily consume these fields +// via duck typing. +type TaskRunStatusFields struct { + // PodName is the name of the pod responsible for executing this task's steps. + PodName string `json:"podName"` + + // StartTime is the time the build is actually started. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // CompletionTime is the time the build completed. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + + // Steps describes the state of each build step container. + // +optional + // +listType=atomic + Steps []StepState `json:"steps,omitempty"` + + // RetriesStatus contains the history of TaskRunStatus in case of a retry in order to keep record of failures. + // All TaskRunStatus stored in RetriesStatus will have no date within the RetriesStatus as is redundant. + // +optional + // +listType=atomic + RetriesStatus []TaskRunStatus `json:"retriesStatus,omitempty"` + + // Results are the list of results written out by the task's containers + // +optional + // +listType=atomic + Results []TaskRunResult `json:"results,omitempty"` + + // The list has one entry per sidecar in the manifest. Each entry is + // represents the imageid of the corresponding sidecar. + // +listType=atomic + Sidecars []SidecarState `json:"sidecars,omitempty"` + + // TaskSpec contains the Spec from the dereferenced Task definition used to instantiate this TaskRun. + TaskSpec *TaskSpec `json:"taskSpec,omitempty"` +} + +// TaskRunStepOverride is used to override the values of a Step in the corresponding Task. +type TaskRunStepOverride struct { + // The name of the Step to override. + Name string `json:"name"` + // The resource requirements to apply to the Step. + Resources corev1.ResourceRequirements `json:"resources"` +} + +// TaskRunSidecarOverride is used to override the values of a Sidecar in the corresponding Task. +type TaskRunSidecarOverride struct { + // The name of the Sidecar to override. + Name string `json:"name"` + // The resource requirements to apply to the Sidecar. + Resources corev1.ResourceRequirements `json:"resources"` +} + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*TaskRun) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind(pipeline.TaskRunControllerName) +} + +// GetStatusCondition returns the task run status as a ConditionAccessor +func (tr *TaskRun) GetStatusCondition() apis.ConditionAccessor { + return &tr.Status +} + +// GetCondition returns the Condition matching the given type. +func (trs *TaskRunStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return taskRunCondSet.Manage(trs).GetCondition(t) +} + +// InitializeConditions will set all conditions in taskRunCondSet to unknown for the TaskRun +// and set the started time to the current time +func (trs *TaskRunStatus) InitializeConditions() { + started := false + if trs.StartTime.IsZero() { + trs.StartTime = &metav1.Time{Time: time.Now()} + started = true + } + conditionManager := taskRunCondSet.Manage(trs) + conditionManager.InitializeConditions() + // Ensure the started reason is set for the "Succeeded" condition + if started { + initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded) + initialCondition.Reason = TaskRunReasonStarted.String() + conditionManager.SetCondition(*initialCondition) + } +} + +// SetCondition sets the condition, unsetting previous conditions with the same +// type as necessary. +func (trs *TaskRunStatus) SetCondition(newCond *apis.Condition) { + if newCond != nil { + taskRunCondSet.Manage(trs).SetCondition(*newCond) + } +} + +// StepState reports the results of running a step in a Task. +type StepState struct { + corev1.ContainerState `json:",inline"` + Name string `json:"name,omitempty"` + Container string `json:"container,omitempty"` + ImageID string `json:"imageID,omitempty"` +} + +// SidecarState reports the results of running a sidecar in a Task. +type SidecarState struct { + corev1.ContainerState `json:",inline"` + Name string `json:"name,omitempty"` + Container string `json:"container,omitempty"` + ImageID string `json:"imageID,omitempty"` +} + +// +genclient +// +genreconciler:krshapedlogic=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TaskRun represents a single execution of a Task. TaskRuns are how the steps +// specified in a Task are executed; they specify the parameters and resources +// used to run the steps in a Task. +// +// +k8s:openapi-gen=true +type TaskRun struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec TaskRunSpec `json:"spec,omitempty"` + // +optional + Status TaskRunStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TaskRunList contains a list of TaskRun +type TaskRunList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []TaskRun `json:"items"` +} + +// GetPipelineRunPVCName for taskrun gets pipelinerun +func (tr *TaskRun) GetPipelineRunPVCName() string { + if tr == nil { + return "" + } + for _, ref := range tr.GetOwnerReferences() { + if ref.Kind == pipeline.PipelineRunControllerName { + return fmt.Sprintf("%s-pvc", ref.Name) + } + } + return "" +} + +// HasPipelineRunOwnerReference returns true of TaskRun has +// owner reference of type PipelineRun +func (tr *TaskRun) HasPipelineRunOwnerReference() bool { + for _, ref := range tr.GetOwnerReferences() { + if ref.Kind == pipeline.PipelineRunControllerName { + return true + } + } + return false +} + +// IsDone returns true if the TaskRun's status indicates that it is done. +func (tr *TaskRun) IsDone() bool { + return !tr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() +} + +// HasStarted function check whether taskrun has valid start time set in its status +func (tr *TaskRun) HasStarted() bool { + return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() +} + +// IsSuccessful returns true if the TaskRun's status indicates that it is done. +func (tr *TaskRun) IsSuccessful() bool { + return tr != nil && tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() +} + +// IsCancelled returns true if the TaskRun's spec status is set to Cancelled state +func (tr *TaskRun) IsCancelled() bool { + return tr.Spec.Status == TaskRunSpecStatusCancelled +} + +// HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout +func (tr *TaskRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { + if tr.Status.StartTime.IsZero() { + return false + } + timeout := tr.GetTimeout(ctx) + // If timeout is set to 0 or defaulted to 0, there is no timeout. + if timeout == apisconfig.NoTimeoutDuration { + return false + } + runtime := c.Since(tr.Status.StartTime.Time) + return runtime > timeout +} + +// GetTimeout returns the timeout for the TaskRun, or the default if not specified +func (tr *TaskRun) GetTimeout(ctx context.Context) time.Duration { + // Use the platform default is no timeout is set + if tr.Spec.Timeout == nil { + defaultTimeout := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) + return defaultTimeout * time.Minute + } + return tr.Spec.Timeout.Duration +} + +// GetNamespacedName returns a k8s namespaced name that identifies this TaskRun +func (tr *TaskRun) GetNamespacedName() types.NamespacedName { + return types.NamespacedName{Namespace: tr.Namespace, Name: tr.Name} +} + +// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is +// used for creating PersistentVolumeClaims with an OwnerReference for each run +func (tr *TaskRun) HasVolumeClaimTemplate() bool { + for _, ws := range tr.Spec.Workspaces { + if ws.VolumeClaimTemplate != nil { + return true + } + } + return false +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go new file mode 100644 index 0000000000..2c6d6521b6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go @@ -0,0 +1,286 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "strings" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/apis" +) + +var _ apis.Validatable = (*TaskRun)(nil) + +// Validate taskrun +func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError { + if apis.IsInDelete(ctx) { + return nil + } + errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata") + return errs.Also(tr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate taskrun spec +func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + // Must have exactly one of taskRef and taskSpec. + if ts.TaskRef == nil && ts.TaskSpec == nil { + errs = errs.Also(apis.ErrMissingOneOf("taskRef", "taskSpec")) + } + if ts.TaskRef != nil && ts.TaskSpec != nil { + errs = errs.Also(apis.ErrMultipleOneOf("taskRef", "taskSpec")) + } + // Validate TaskRef if it's present. + if ts.TaskRef != nil { + errs = errs.Also(ts.TaskRef.Validate(ctx).ViaField("taskRef")) + } + // Validate TaskSpec if it's present. + if ts.TaskSpec != nil { + // skip validation of parameter and workspaces variables since we validate them via taskrunspec below. + ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) + errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) + } + + errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params")) + + // Validate propagated parameters + errs = errs.Also(ts.validateInlineParameters(ctx)) + errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) + if ts.Debug != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) + errs = errs.Also(validateDebug(ts.Debug).ViaField("debug")) + } + if ts.StepOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(validateStepOverrides(ts.StepOverrides).ViaField("stepOverrides")) + } + if ts.SidecarOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(validateSidecarOverrides(ts.SidecarOverrides).ViaField("sidecarOverrides")) + } + if ts.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(ts.ComputeResources, ts.StepOverrides)) + } + + if ts.Status != "" { + if ts.Status != TaskRunSpecStatusCancelled { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be %s", ts.Status, TaskRunSpecStatusCancelled), "status")) + } + } + if ts.Status == "" { + if ts.StatusMessage != "" { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", ts.StatusMessage), "statusMessage")) + } + } + + if ts.Timeout != nil { + // timeout should be a valid duration of at least 0. + if ts.Timeout.Duration < 0 { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "timeout")) + } + } + + return errs +} + +// validateInlineParameters validates that any parameters called in the +// Task spec are declared in the TaskRun. +// This is crucial for propagated parameters because the parameters could +// be defined under taskRun and then called directly in the task steps. +// In this case, parameters cannot be validated by the underlying taskSpec +// since they may not have the parameters declared because of propagation. +func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis.FieldError) { + if ts.TaskSpec == nil { + return errs + } + paramSpecForValidation := make(map[string]ParamSpec) + for _, p := range ts.Params { + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) + } + + for _, p := range ts.TaskSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) + } + } + var paramSpec []ParamSpec + for _, v := range paramSpecForValidation { + paramSpec = append(paramSpec, v) + } + if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { + errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) + errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) + } + return errs +} + +func createParamSpecFromParam(p Param, paramSpecForValidation map[string]ParamSpec) map[string]ParamSpec { + value := p.Value + pSpec := ParamSpec{ + Name: p.Name, + Default: &value, + Type: p.Value.Type, + } + if p.Value.ObjectVal != nil { + pSpec.Properties = make(map[string]PropertySpec) + prop := make(map[string]PropertySpec) + for k := range p.Value.ObjectVal { + prop[k] = PropertySpec{Type: ParamTypeString} + } + pSpec.Properties = prop + } + paramSpecForValidation[p.Name] = pSpec + return paramSpecForValidation +} + +func combineParamSpec(p ParamSpec, paramSpecForValidation map[string]ParamSpec) (map[string]ParamSpec, *apis.FieldError) { + if pSpec, ok := paramSpecForValidation[p.Name]; ok { + // Merge defaults with provided values in the taskrun. + if p.Default != nil && p.Default.ObjectVal != nil { + for k, v := range p.Default.ObjectVal { + if pSpec.Default.ObjectVal == nil { + pSpec.Default.ObjectVal = map[string]string{k: v} + } else { + pSpec.Default.ObjectVal[k] = v + } + } + // If Default values of object type are provided then Properties must also be fully declared. + if p.Properties == nil { + return paramSpecForValidation, apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + } + + // Properties must be defined if paramSpec is of object Type + if pSpec.Type == ParamTypeObject { + if p.Properties == nil { + return paramSpecForValidation, apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + // Expect Properties to be complete + pSpec.Properties = p.Properties + } + paramSpecForValidation[p.Name] = pSpec + } else { + // No values provided by task run but found a paramSpec declaration. + // Expect it to be fully speced out. + paramSpecForValidation[p.Name] = p + } + return paramSpecForValidation, nil +} + +// validateDebug +func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) { + breakpointOnFailure := "onFailure" + validBreakpoints := sets.NewString() + validBreakpoints.Insert(breakpointOnFailure) + + for _, b := range db.Breakpoint { + if !validBreakpoints.Has(b) { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("%s is not a valid breakpoint. Available valid breakpoints include %s", b, validBreakpoints.List()), "breakpoint")) + } + } + return errs +} + +// ValidateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. +func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { + var names []string + for idx, w := range wb { + names = append(names, w.Name) + errs = errs.Also(w.Validate(ctx).ViaIndex(idx)) + } + errs = errs.Also(validateNoDuplicateNames(names, true)) + return errs +} + +// ValidateParameters makes sure the params for the Task are valid. +func ValidateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { + var names []string + for _, p := range params { + if p.Value.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a taskrun spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } + names = append(names, p.Name) + } + return errs.Also(validateNoDuplicateNames(names, false)) +} + +func validateStepOverrides(overrides []TaskRunStepOverride) (errs *apis.FieldError) { + var names []string + for i, o := range overrides { + if o.Name == "" { + errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i)) + } else { + names = append(names, o.Name) + } + } + errs = errs.Also(validateNoDuplicateNames(names, true)) + return errs +} + +// validateTaskRunComputeResources ensures that compute resources are not configured at both the step level and the task level +func validateTaskRunComputeResources(computeResources *corev1.ResourceRequirements, overrides []TaskRunStepOverride) (errs *apis.FieldError) { + for _, override := range overrides { + if override.Resources.Size() != 0 && computeResources != nil { + return apis.ErrMultipleOneOf( + "stepOverrides.resources", + "computeResources", + ) + } + } + return nil +} + +func validateSidecarOverrides(overrides []TaskRunSidecarOverride) (errs *apis.FieldError) { + var names []string + for i, o := range overrides { + if o.Name == "" { + errs = errs.Also(apis.ErrMissingField("name").ViaIndex(i)) + } else { + names = append(names, o.Name) + } + } + errs = errs.Also(validateNoDuplicateNames(names, true)) + return errs +} + +// validateNoDuplicateNames returns an error for each name that is repeated in names. +// Case insensitive. +// If byIndex is true, the error will be reported by index instead of by key. +func validateNoDuplicateNames(names []string, byIndex bool) (errs *apis.FieldError) { + seen := sets.NewString() + for i, n := range names { + if seen.Has(strings.ToLower(n)) { + if byIndex { + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaIndex(i)) + } else { + errs = errs.Also(apis.ErrMultipleOneOf("name").ViaKey(n)) + } + } + seen.Insert(strings.ToLower(n)) + } + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_validation.go index b944a9517b..c7a0b99736 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/when_validation.go @@ -32,8 +32,7 @@ var validWhenOperators = []string{ } func (wes WhenExpressions) validate() *apis.FieldError { - errs := wes.validateWhenExpressionsFields().ViaField("when") - return errs.Also(wes.validateTaskResultsVariables().ViaField("when")) + return wes.validateWhenExpressionsFields().ViaField("when") } func (wes WhenExpressions) validateWhenExpressionsFields() (errs *apis.FieldError) { @@ -57,23 +56,6 @@ func (we *WhenExpression) validateWhenExpressionFields() *apis.FieldError { return nil } -func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { - for idx, we := range wes { - expressions, ok := we.GetVarSubstitutionExpressions() - if ok { - if LooksLikeContainsResultRefs(expressions) { - expressions = filter(expressions, looksLikeResultRef) - resultRefs := NewResultRefs(expressions) - if len(expressions) != len(resultRefs) { - message := fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs) - return apis.ErrInvalidValue(message, apis.CurrentField).ViaIndex(idx) - } - } - } - } - return nil -} - func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, we := range wes { errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go index da89660624..a68c3064eb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go @@ -77,6 +77,12 @@ type WorkspaceBinding struct { // Secret represents a secret that should populate this workspace. // +optional Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + // Projected represents a projected volume that should populate this workspace. + // +optional + Projected *corev1.ProjectedVolumeSource `json:"projected,omitempty"` + // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty"` } // WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go index be852bb46d..87fe4a3a42 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go @@ -19,6 +19,8 @@ package v1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -36,7 +38,7 @@ var allVolumeSourceFields = []string{ // Validate looks at the Volume provided in wb and makes sure that it is valid. // This means that only one VolumeSource can be specified, and also that the // supported VolumeSource is itself valid. -func (b *WorkspaceBinding) Validate(context.Context) *apis.FieldError { +func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) { if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { return apis.ErrMissingField(apis.CurrentField) } @@ -66,6 +68,29 @@ func (b *WorkspaceBinding) Validate(context.Context) *apis.FieldError { return apis.ErrMissingField("secret.secretName") } + // The projected workspace is only supported when the alpha feature gate is enabled. + // For a Projected volume to work, you must provide at least one source. + if b.Projected != nil { + if err := version.ValidateEnabledAPIFields(ctx, "projected workspace type", config.AlphaAPIFields).ViaField("workspace"); err != nil { + return err + } + if len(b.Projected.Sources) == 0 { + return apis.ErrMissingField("projected.sources") + } + } + + // The csi workspace is only supported when the alpha feature gate is enabled. + // For a CSI to work, you must provide and have installed the driver to use. + if b.CSI != nil { + errs := version.ValidateEnabledAPIFields(ctx, "csi workspace type", config.AlphaAPIFields).ViaField("workspaces") + if errs != nil { + return errs + } + if b.CSI.Driver == "" { + return apis.ErrMissingField("csi.driver") + } + } + return nil } @@ -88,5 +113,11 @@ func (b *WorkspaceBinding) numSources() int { if b.Secret != nil { n++ } + if b.Projected != nil { + n++ + } + if b.CSI != nil { + n++ + } return n } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go index 81868ef3d7..20e7a140df 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go @@ -22,11 +22,37 @@ limitations under the License. package v1 import ( + pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ChildStatusReference) DeepCopyInto(out *ChildStatusReference) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.WhenExpressions != nil { + in, out := &in.WhenExpressions, &out.WhenExpressions + *out = make([]WhenExpression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChildStatusReference. +func (in *ChildStatusReference) DeepCopy() *ChildStatusReference { + if in == nil { + return nil + } + out := new(ChildStatusReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EmbeddedTask) DeepCopyInto(out *EmbeddedTask) { *out = *in @@ -47,6 +73,29 @@ func (in *EmbeddedTask) DeepCopy() *EmbeddedTask { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Matrix) DeepCopyInto(out *Matrix) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matrix. +func (in *Matrix) DeepCopy() *Matrix { + if in == nil { + return nil + } + out := new(Matrix) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Param) DeepCopyInto(out *Param) { *out = *in @@ -180,6 +229,23 @@ func (in *PipelineList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRef) DeepCopyInto(out *PipelineRef) { + *out = *in + in.ResolverRef.DeepCopyInto(&out.ResolverRef) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRef. +func (in *PipelineRef) DeepCopy() *PipelineRef { + if in == nil { + return nil + } + out := new(PipelineRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResult) DeepCopyInto(out *PipelineResult) { *out = *in @@ -197,6 +263,265 @@ func (in *PipelineResult) DeepCopy() *PipelineResult { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRun) DeepCopyInto(out *PipelineRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRun. +func (in *PipelineRun) DeepCopy() *PipelineRun { + if in == nil { + return nil + } + out := new(PipelineRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipelineRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PipelineRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunList. +func (in *PipelineRunList) DeepCopy() *PipelineRunList { + if in == nil { + return nil + } + out := new(PipelineRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PipelineRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunResult) DeepCopyInto(out *PipelineRunResult) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunResult. +func (in *PipelineRunResult) DeepCopy() *PipelineRunResult { + if in == nil { + return nil + } + out := new(PipelineRunResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunRunStatus) DeepCopyInto(out *PipelineRunRunStatus) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(v1alpha1.RunStatus) + (*in).DeepCopyInto(*out) + } + if in.WhenExpressions != nil { + in, out := &in.WhenExpressions, &out.WhenExpressions + *out = make([]WhenExpression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunRunStatus. +func (in *PipelineRunRunStatus) DeepCopy() *PipelineRunRunStatus { + if in == nil { + return nil + } + out := new(PipelineRunRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { + *out = *in + if in.PipelineRef != nil { + in, out := &in.PipelineRef, &out.PipelineRef + *out = new(PipelineRef) + (*in).DeepCopyInto(*out) + } + if in.PipelineSpec != nil { + in, out := &in.PipelineSpec, &out.PipelineSpec + *out = new(PipelineSpec) + (*in).DeepCopyInto(*out) + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeouts != nil { + in, out := &in.Timeouts, &out.Timeouts + *out = new(TimeoutFields) + (*in).DeepCopyInto(*out) + } + in.TaskRunTemplate.DeepCopyInto(&out.TaskRunTemplate) + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskRunSpecs != nil { + in, out := &in.TaskRunSpecs, &out.TaskRunSpecs + *out = make([]PipelineTaskRunSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpec. +func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec { + if in == nil { + return nil + } + out := new(PipelineRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunStatus) DeepCopyInto(out *PipelineRunStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.PipelineRunStatusFields.DeepCopyInto(&out.PipelineRunStatusFields) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatus. +func (in *PipelineRunStatus) DeepCopy() *PipelineRunStatus { + if in == nil { + return nil + } + out := new(PipelineRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]PipelineRunResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PipelineSpec != nil { + in, out := &in.PipelineSpec, &out.PipelineSpec + *out = new(PipelineSpec) + (*in).DeepCopyInto(*out) + } + if in.SkippedTasks != nil { + in, out := &in.SkippedTasks, &out.SkippedTasks + *out = make([]SkippedTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ChildReferences != nil { + in, out := &in.ChildReferences, &out.ChildReferences + *out = make([]ChildStatusReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FinallyStartTime != nil { + in, out := &in.FinallyStartTime, &out.FinallyStartTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunStatusFields. +func (in *PipelineRunStatusFields) DeepCopy() *PipelineRunStatusFields { + if in == nil { + return nil + } + out := new(PipelineRunStatusFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineRunTaskRunStatus) DeepCopyInto(out *PipelineRunTaskRunStatus) { + *out = *in + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(TaskRunStatus) + (*in).DeepCopyInto(*out) + } + if in.WhenExpressions != nil { + in, out := &in.WhenExpressions, &out.WhenExpressions + *out = make([]WhenExpression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunTaskRunStatus. +func (in *PipelineRunTaskRunStatus) DeepCopy() *PipelineRunTaskRunStatus { + if in == nil { + return nil + } + out := new(PipelineRunTaskRunStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { *out = *in @@ -280,10 +605,8 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Matrix != nil { in, out := &in.Matrix, &out.Matrix - *out = make([]Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(Matrix) + (*in).DeepCopyInto(*out) } if in.Workspaces != nil { in, out := &in.Workspaces, &out.Workspaces @@ -377,49 +700,115 @@ func (in *PipelineTaskParam) DeepCopy() *PipelineTaskParam { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) { +func (in *PipelineTaskRun) DeepCopyInto(out *PipelineTaskRun) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration. -func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRun. +func (in *PipelineTaskRun) DeepCopy() *PipelineTaskRun { if in == nil { return nil } - out := new(PipelineWorkspaceDeclaration) + out := new(PipelineTaskRun) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { +func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { *out = *in + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(pod.Template) + (*in).DeepCopyInto(*out) + } + if in.StepOverrides != nil { + in, out := &in.StepOverrides, &out.StepOverrides + *out = make([]TaskRunStepOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SidecarOverrides != nil { + in, out := &in.SidecarOverrides, &out.SidecarOverrides + *out = make([]TaskRunSidecarOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(PipelineTaskMetadata) + (*in).DeepCopyInto(*out) + } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. -func (in *PropertySpec) DeepCopy() *PropertySpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunSpec. +func (in *PipelineTaskRunSpec) DeepCopy() *PipelineTaskRunSpec { if in == nil { return nil } - out := new(PropertySpec) + out := new(PipelineTaskRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineTaskRunTemplate) DeepCopyInto(out *PipelineTaskRunTemplate) { + *out = *in + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(pod.Template) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunTemplate. +func (in *PipelineTaskRunTemplate) DeepCopy() *PipelineTaskRunTemplate { + if in == nil { + return nil + } + out := new(PipelineTaskRunTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration. +func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration { + if in == nil { + return nil + } + out := new(PipelineWorkspaceDeclaration) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { +func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParam. -func (in *ResolverParam) DeepCopy() *ResolverParam { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. +func (in *PropertySpec) DeepCopy() *PropertySpec { if in == nil { return nil } - out := new(ResolverParam) + out := new(PropertySpec) in.DeepCopyInto(out) return out } @@ -427,10 +816,12 @@ func (in *ResolverParam) DeepCopy() *ResolverParam { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { *out = *in - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = make([]ResolverParam, len(*in)) - copy(*out, *in) + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -544,7 +935,47 @@ func (in *Sidecar) DeepCopy() *Sidecar { if in == nil { return nil } - out := new(Sidecar) + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SidecarState) DeepCopyInto(out *SidecarState) { + *out = *in + in.ContainerState.DeepCopyInto(&out.ContainerState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SidecarState. +func (in *SidecarState) DeepCopy() *SidecarState { + if in == nil { + return nil + } + out := new(SidecarState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkippedTask) DeepCopyInto(out *SkippedTask) { + *out = *in + if in.WhenExpressions != nil { + in, out := &in.WhenExpressions, &out.WhenExpressions + *out = make([]WhenExpression, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkippedTask. +func (in *SkippedTask) DeepCopy() *SkippedTask { + if in == nil { + return nil + } + out := new(SkippedTask) in.DeepCopyInto(out) return out } @@ -643,6 +1074,23 @@ func (in *StepOutputConfig) DeepCopy() *StepOutputConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepState) DeepCopyInto(out *StepState) { + *out = *in + in.ContainerState.DeepCopyInto(&out.ContainerState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepState. +func (in *StepState) DeepCopy() *StepState { + if in == nil { + return nil + } + out := new(StepState) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { *out = *in @@ -801,6 +1249,111 @@ func (in *TaskResult) DeepCopy() *TaskResult { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRun) DeepCopyInto(out *TaskRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRun. +func (in *TaskRun) DeepCopy() *TaskRun { + if in == nil { + return nil + } + out := new(TaskRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunDebug) DeepCopyInto(out *TaskRunDebug) { + *out = *in + if in.Breakpoint != nil { + in, out := &in.Breakpoint, &out.Breakpoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunDebug. +func (in *TaskRunDebug) DeepCopy() *TaskRunDebug { + if in == nil { + return nil + } + out := new(TaskRunDebug) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunInputs. +func (in *TaskRunInputs) DeepCopy() *TaskRunInputs { + if in == nil { + return nil + } + out := new(TaskRunInputs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunList) DeepCopyInto(out *TaskRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TaskRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunList. +func (in *TaskRunList) DeepCopy() *TaskRunList { + if in == nil { + return nil + } + out := new(TaskRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) { *out = *in @@ -818,6 +1371,189 @@ func (in *TaskRunResult) DeepCopy() *TaskRunResult { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunSidecarOverride) DeepCopyInto(out *TaskRunSidecarOverride) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSidecarOverride. +func (in *TaskRunSidecarOverride) DeepCopy() *TaskRunSidecarOverride { + if in == nil { + return nil + } + out := new(TaskRunSidecarOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { + *out = *in + if in.Debug != nil { + in, out := &in.Debug, &out.Debug + *out = new(TaskRunDebug) + (*in).DeepCopyInto(*out) + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskRef != nil { + in, out := &in.TaskRef, &out.TaskRef + *out = new(TaskRef) + (*in).DeepCopyInto(*out) + } + if in.TaskSpec != nil { + in, out := &in.TaskSpec, &out.TaskSpec + *out = new(TaskSpec) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.PodTemplate != nil { + in, out := &in.PodTemplate, &out.PodTemplate + *out = new(pod.Template) + (*in).DeepCopyInto(*out) + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepOverrides != nil { + in, out := &in.StepOverrides, &out.StepOverrides + *out = make([]TaskRunStepOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SidecarOverrides != nil { + in, out := &in.SidecarOverrides, &out.SidecarOverrides + *out = make([]TaskRunSidecarOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSpec. +func (in *TaskRunSpec) DeepCopy() *TaskRunSpec { + if in == nil { + return nil + } + out := new(TaskRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunStatus) DeepCopyInto(out *TaskRunStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.TaskRunStatusFields.DeepCopyInto(&out.TaskRunStatusFields) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatus. +func (in *TaskRunStatus) DeepCopy() *TaskRunStatus { + if in == nil { + return nil + } + out := new(TaskRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunStatusFields) DeepCopyInto(out *TaskRunStatusFields) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RetriesStatus != nil { + in, out := &in.RetriesStatus, &out.RetriesStatus + *out = make([]TaskRunStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]TaskRunResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]SidecarState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TaskSpec != nil { + in, out := &in.TaskSpec, &out.TaskSpec + *out = new(TaskSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStatusFields. +func (in *TaskRunStatusFields) DeepCopy() *TaskRunStatusFields { + if in == nil { + return nil + } + out := new(TaskRunStatusFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunStepOverride) DeepCopyInto(out *TaskRunStepOverride) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunStepOverride. +func (in *TaskRunStepOverride) DeepCopy() *TaskRunStepOverride { + if in == nil { + return nil + } + out := new(TaskRunStepOverride) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { *out = *in @@ -879,6 +1615,37 @@ func (in *TaskSpec) DeepCopy() *TaskSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeoutFields) DeepCopyInto(out *TimeoutFields) { + *out = *in + if in.Pipeline != nil { + in, out := &in.Pipeline, &out.Pipeline + *out = new(metav1.Duration) + **out = **in + } + if in.Tasks != nil { + in, out := &in.Tasks, &out.Tasks + *out = new(metav1.Duration) + **out = **in + } + if in.Finally != nil { + in, out := &in.Finally, &out.Finally + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeoutFields. +func (in *TimeoutFields) DeepCopy() *TimeoutFields { + if in == nil { + return nil + } + out := new(TimeoutFields) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WhenExpression) DeepCopyInto(out *WhenExpression) { *out = *in @@ -950,6 +1717,16 @@ func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { *out = new(corev1.SecretVolumeSource) (*in).DeepCopyInto(*out) } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(corev1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(corev1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go index d7ca61a05c..238e3f7bda 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go @@ -28,7 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" ) @@ -102,6 +102,8 @@ const ( // RunCancelledByPipelineMsg indicates that the PipelineRun of which part this Run was // has been cancelled. RunCancelledByPipelineMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has been cancelled." + // RunCancelledByPipelineTimeoutMsg indicates that the Run was cancelled because the PipelineRun running it timed out. + RunCancelledByPipelineTimeoutMsg RunSpecStatusMessage = "Run cancelled as the PipelineRun it belongs to has timed out." ) // GetParam gets the Param from the RunSpec with the given name diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_defaults.go new file mode 100644 index 0000000000..9105d16a6c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_defaults.go @@ -0,0 +1,41 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "knative.dev/pkg/apis" +) + +var _ apis.Defaultable = (*CustomRun)(nil) + +// SetDefaults implements apis.Defaultable +func (r *CustomRun) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, r.ObjectMeta) + r.Spec.SetDefaults(apis.WithinSpec(ctx)) +} + +// SetDefaults implements apis.Defaultable +func (rs *CustomRunSpec) SetDefaults(ctx context.Context) { + cfg := config.FromContextOrDefaults(ctx) + defaultSA := cfg.Defaults.DefaultServiceAccount + if rs.ServiceAccountName == "" && defaultSA != "" { + rs.ServiceAccountName = defaultSA + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go new file mode 100644 index 0000000000..4d602fd5b9 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_types.go @@ -0,0 +1,243 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "time" + + apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + runv1beta1 "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/clock" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// EmbeddedCustomRunSpec allows custom task definitions to be embedded +type EmbeddedCustomRunSpec struct { + runtime.TypeMeta `json:",inline"` + + // +optional + Metadata PipelineTaskMetadata `json:"metadata,omitempty"` + + // Spec is a specification of a custom task + // +optional + Spec runtime.RawExtension `json:"spec,omitempty"` +} + +// CustomRunSpec defines the desired state of CustomRun +type CustomRunSpec struct { + // +optional + CustomRef *TaskRef `json:"customRef,omitempty"` + + // Spec is a specification of a custom task + // +optional + CustomSpec *EmbeddedCustomRunSpec `json:"customSpec,omitempty"` + + // +optional + // +listType=atomic + Params []Param `json:"params,omitempty"` + + // Used for cancelling a customrun (and maybe more later on) + // +optional + Status CustomRunSpecStatus `json:"status,omitempty"` + + // Status message for cancellation. + // +optional + StatusMessage CustomRunSpecStatusMessage `json:"statusMessage,omitempty"` + + // Used for propagating retries count to custom tasks + // +optional + Retries int `json:"retries,omitempty"` + + // +optional + ServiceAccountName string `json:"serviceAccountName"` + + // Time after which the custom-task times out. + // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Workspaces is a list of WorkspaceBindings from volumes to workspaces. + // +optional + // +listType=atomic + Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` +} + +// CustomRunSpecStatus defines the taskrun spec status the user can provide +type CustomRunSpecStatus string + +const ( + // CustomRunSpecStatusCancelled indicates that the user wants to cancel the run, + // if not already cancelled or terminated + CustomRunSpecStatusCancelled CustomRunSpecStatus = "RunCancelled" +) + +// CustomRunSpecStatusMessage defines human readable status messages for the TaskRun. +type CustomRunSpecStatusMessage string + +const ( + // CustomRunCancelledByPipelineMsg indicates that the PipelineRun of which part this CustomRun was + // has been cancelled. + CustomRunCancelledByPipelineMsg CustomRunSpecStatusMessage = "CustomRun cancelled as the PipelineRun it belongs to has been cancelled." + // CustomRunCancelledByPipelineTimeoutMsg indicates that the Run was cancelled because the PipelineRun running it timed out. + CustomRunCancelledByPipelineTimeoutMsg CustomRunSpecStatusMessage = "CustomRun cancelled as the PipelineRun it belongs to has timed out." +) + +// GetParam gets the Param from the CustomRunSpec with the given name +// TODO(jasonhall): Move this to a Params type so other code can use it? +func (rs CustomRunSpec) GetParam(name string) *Param { + for _, p := range rs.Params { + if p.Name == name { + return &p + } + } + return nil +} + +const ( + // CustomRunReasonCancelled must be used in the Condition Reason to indicate that a CustomRun was cancelled. + CustomRunReasonCancelled = "CustomRunCancelled" + // CustomRunReasonTimedOut must be used in the Condition Reason to indicate that a CustomRun was timed out. + CustomRunReasonTimedOut = "CustomRunTimedOut" + // CustomRunReasonWorkspaceNotSupported can be used in the Condition Reason to indicate that the + // CustomRun contains a workspace which is not supported by this custom task. + CustomRunReasonWorkspaceNotSupported = "CustomRunWorkspaceNotSupported" +) + +// CustomRunStatus defines the observed state of CustomRun. +type CustomRunStatus = runv1beta1.CustomRunStatus + +var customrunCondSet = apis.NewBatchConditionSet() + +// GetConditionSet retrieves the condition set for this resource. Implements +// the KRShaped interface. +func (r *CustomRun) GetConditionSet() apis.ConditionSet { return customrunCondSet } + +// GetStatus retrieves the status of the Parallel. Implements the KRShaped +// interface. +func (r *CustomRun) GetStatus() *duckv1.Status { return &r.Status.Status } + +// CustomRunStatusFields holds the fields of CustomRun's status. This is defined +// separately and inlined so that other types can readily consume these fields +// via duck typing. +type CustomRunStatusFields = runv1beta1.CustomRunStatusFields + +// CustomRunResult used to describe the results of a task +type CustomRunResult = runv1beta1.CustomRunResult + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomRun represents a single execution of a Custom Task. +// +// +k8s:openapi-gen=true +type CustomRun struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +optional + Spec CustomRunSpec `json:"spec,omitempty"` + // +optional + Status CustomRunStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CustomRunList contains a list of CustomRun +type CustomRunList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []CustomRun `json:"items"` +} + +// GetStatusCondition returns the task run status as a ConditionAccessor +func (r *CustomRun) GetStatusCondition() apis.ConditionAccessor { + return &r.Status +} + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*CustomRun) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind(pipeline.RunControllerName) +} + +// HasPipelineRunOwnerReference returns true of CustomRun has +// owner reference of type PipelineRun +func (r *CustomRun) HasPipelineRunOwnerReference() bool { + for _, ref := range r.GetOwnerReferences() { + if ref.Kind == pipeline.PipelineRunControllerName { + return true + } + } + return false +} + +// IsCancelled returns true if the CustomRun's spec status is set to Cancelled state +func (r *CustomRun) IsCancelled() bool { + return r.Spec.Status == CustomRunSpecStatusCancelled +} + +// IsDone returns true if the CustomRun's status indicates that it is done. +func (r *CustomRun) IsDone() bool { + return !r.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() +} + +// HasStarted function check whether taskrun has valid start time set in its status +func (r *CustomRun) HasStarted() bool { + return r.Status.StartTime != nil && !r.Status.StartTime.IsZero() +} + +// IsSuccessful returns true if the CustomRun's status indicates that it is done. +func (r *CustomRun) IsSuccessful() bool { + return r != nil && r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() +} + +// GetCustomRunKey return the customrun's key for timeout handler map +func (r *CustomRun) GetCustomRunKey() string { + // The address of the pointer is a threadsafe unique identifier for the customrun + return fmt.Sprintf("%s/%p", "CustomRun", r) +} + +// HasTimedOut returns true if the CustomRun's running time is beyond the allowed timeout +func (r *CustomRun) HasTimedOut(c clock.PassiveClock) bool { + if r.Status.StartTime == nil || r.Status.StartTime.IsZero() { + return false + } + timeout := r.GetTimeout() + // If timeout is set to 0 or defaulted to 0, there is no timeout. + if timeout == apisconfig.NoTimeoutDuration { + return false + } + runtime := c.Since(r.Status.StartTime.Time) + return runtime > timeout +} + +// GetTimeout returns the timeout for this customrun, or the default if not configured +func (r *CustomRun) GetTimeout() time.Duration { + // Use the platform default if no timeout is set + if r.Spec.Timeout == nil { + return apisconfig.DefaultTimeoutMinutes * time.Minute + } + return r.Spec.Timeout.Duration +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_validation.go new file mode 100644 index 0000000000..51ede3d6ca --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/customrun_validation.go @@ -0,0 +1,80 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "github.com/tektoncd/pipeline/pkg/apis/validate" + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" +) + +var _ apis.Validatable = (*CustomRun)(nil) + +// Validate customRun +func (r *CustomRun) Validate(ctx context.Context) *apis.FieldError { + if err := validate.ObjectMetadata(r.GetObjectMeta()).ViaField("metadata"); err != nil { + return err + } + if apis.IsInDelete(ctx) { + return nil + } + return r.Spec.Validate(ctx) +} + +// Validate CustomRun spec +func (rs *CustomRunSpec) Validate(ctx context.Context) *apis.FieldError { + // this covers the case rs.customRef == nil && rs.customSpec == nil + if equality.Semantic.DeepEqual(rs, &CustomRunSpec{}) { + return apis.ErrMissingField("spec") + } + + if rs.CustomRef != nil && rs.CustomSpec != nil { + return apis.ErrMultipleOneOf("spec.customRef", "spec.customSpec") + } + if rs.CustomRef == nil && rs.CustomSpec == nil { + return apis.ErrMissingOneOf("spec.customRef", "spec.customSpec") + } + if rs.CustomRef != nil { + if rs.CustomRef.APIVersion == "" { + return apis.ErrMissingField("spec.customRef.apiVersion") + } + if rs.CustomRef.Kind == "" { + return apis.ErrMissingField("spec.customRef.kind") + } + } + if rs.CustomSpec != nil { + if rs.CustomSpec.APIVersion == "" { + return apis.ErrMissingField("spec.customSpec.apiVersion") + } + if rs.CustomSpec.Kind == "" { + return apis.ErrMissingField("spec.customSpec.kind") + } + } + if rs.Status == "" { + if rs.StatusMessage != "" { + return apis.ErrInvalidValue(fmt.Sprintf("statusMessage should not be set if status is not set, but it is currently set to %s", rs.StatusMessage), "statusMessage") + } + } + if err := ValidateParameters(ctx, rs.Params).ViaField("spec.params"); err != nil { + return err + } + + return ValidateWorkspaceBindings(ctx, rs.Workspaces).ViaField("spec.workspaces") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index d57f333d8c..885182db90 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -30,88 +30,97 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference": schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery": schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState": schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTask": schema_pkg_apis_pipeline_v1beta1_ClusterTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTaskList": schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec": schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue": schema_pkg_apis_pipeline_v1beta1_ParamValue(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline": schema_pkg_apis_pipeline_v1beta1_Pipeline(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource": schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineList": schema_pkg_apis_pipeline_v1beta1_PipelineList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult": schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult": schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec": schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask": schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskParam": schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources": schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRun": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec": schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverParam": schema_pkg_apis_pipeline_v1beta1_ResolverParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskList": schema_pkg_apis_pipeline_v1beta1_TaskList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef": schema_pkg_apis_pipeline_v1beta1_TaskRef(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource": schema_pkg_apis_pipeline_v1beta1_TaskResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding": schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources": schema_pkg_apis_pipeline_v1beta1_TaskResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult": schema_pkg_apis_pipeline_v1beta1_TaskResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun": schema_pkg_apis_pipeline_v1beta1_TaskRun(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug": schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunInputs": schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunList": schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunOutputs": schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources": schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult": schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec": schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus": schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec": schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields": schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression": schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding": schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref), - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage": schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource": schema_pkg_apis_resource_v1alpha1_PipelineResource(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceList": schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec": schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus": schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceDeclaration": schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam": schema_pkg_apis_resource_v1alpha1_ResourceParam(ref), - "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam": schema_pkg_apis_resource_v1alpha1_SecretParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ChildStatusReference": schema_pkg_apis_pipeline_v1beta1_ChildStatusReference(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDelivery": schema_pkg_apis_pipeline_v1beta1_CloudEventDelivery(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CloudEventDeliveryState": schema_pkg_apis_pipeline_v1beta1_CloudEventDeliveryState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTask": schema_pkg_apis_pipeline_v1beta1_ClusterTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ClusterTaskList": schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun": schema_pkg_apis_pipeline_v1beta1_CustomRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunList": schema_pkg_apis_pipeline_v1beta1_CustomRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec": schema_pkg_apis_pipeline_v1beta1_CustomRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec": schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask": schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.InternalTaskModifier": schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix": schema_pkg_apis_pipeline_v1beta1_Matrix(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param": schema_pkg_apis_pipeline_v1beta1_Param(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamSpec": schema_pkg_apis_pipeline_v1beta1_ParamSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ParamValue": schema_pkg_apis_pipeline_v1beta1_ParamValue(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Pipeline": schema_pkg_apis_pipeline_v1beta1_Pipeline(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineDeclaredResource": schema_pkg_apis_pipeline_v1beta1_PipelineDeclaredResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineList": schema_pkg_apis_pipeline_v1beta1_PipelineList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRef": schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceBinding": schema_pkg_apis_pipeline_v1beta1_PipelineResourceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceRef": schema_pkg_apis_pipeline_v1beta1_PipelineResourceRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResourceResult": schema_pkg_apis_pipeline_v1beta1_PipelineResourceResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineResult": schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRun": schema_pkg_apis_pipeline_v1beta1_PipelineRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunList": schema_pkg_apis_pipeline_v1beta1_PipelineRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunResult": schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunStatusFields": schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineRunTaskRunStatus": schema_pkg_apis_pipeline_v1beta1_PipelineRunTaskRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineSpec": schema_pkg_apis_pipeline_v1beta1_PipelineSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTask": schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskInputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskInputResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata": schema_pkg_apis_pipeline_v1beta1_PipelineTaskMetadata(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskOutputResource": schema_pkg_apis_pipeline_v1beta1_PipelineTaskOutputResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskParam": schema_pkg_apis_pipeline_v1beta1_PipelineTaskParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources": schema_pkg_apis_pipeline_v1beta1_PipelineTaskResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRun": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskRunSpec": schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_PipelineWorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec": schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverRef": schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResultRef": schema_pkg_apis_pipeline_v1beta1_ResultRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Sidecar": schema_pkg_apis_pipeline_v1beta1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskList": schema_pkg_apis_pipeline_v1beta1_TaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef": schema_pkg_apis_pipeline_v1beta1_TaskRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResource": schema_pkg_apis_pipeline_v1beta1_TaskResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResourceBinding": schema_pkg_apis_pipeline_v1beta1_TaskResourceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResources": schema_pkg_apis_pipeline_v1beta1_TaskResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskResult": schema_pkg_apis_pipeline_v1beta1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRun": schema_pkg_apis_pipeline_v1beta1_TaskRun(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug": schema_pkg_apis_pipeline_v1beta1_TaskRunDebug(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunInputs": schema_pkg_apis_pipeline_v1beta1_TaskRunInputs(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunList": schema_pkg_apis_pipeline_v1beta1_TaskRunList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunOutputs": schema_pkg_apis_pipeline_v1beta1_TaskRunOutputs(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources": schema_pkg_apis_pipeline_v1beta1_TaskRunResources(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResult": schema_pkg_apis_pipeline_v1beta1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunSidecarOverride(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSpec": schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatus": schema_pkg_apis_pipeline_v1beta1_TaskRunStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStatusFields": schema_pkg_apis_pipeline_v1beta1_TaskRunStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride": schema_pkg_apis_pipeline_v1beta1_TaskRunStepOverride(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec": schema_pkg_apis_pipeline_v1beta1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TimeoutFields": schema_pkg_apis_pipeline_v1beta1_TimeoutFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression": schema_pkg_apis_pipeline_v1beta1_WhenExpression(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding": schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1beta1_WorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1beta1_WorkspacePipelineTaskBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage": schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref), + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequest": schema_pkg_apis_resolution_v1alpha1_ResolutionRequest(ref), + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestList": schema_pkg_apis_resolution_v1alpha1_ResolutionRequestList(ref), + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestSpec": schema_pkg_apis_resolution_v1alpha1_ResolutionRequestSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestStatus": schema_pkg_apis_resolution_v1alpha1_ResolutionRequestStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestStatusFields": schema_pkg_apis_resolution_v1alpha1_ResolutionRequestStatusFields(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResource": schema_pkg_apis_resource_v1alpha1_PipelineResource(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceList": schema_pkg_apis_resource_v1alpha1_PipelineResourceList(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceSpec": schema_pkg_apis_resource_v1alpha1_PipelineResourceSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.PipelineResourceStatus": schema_pkg_apis_resource_v1alpha1_PipelineResourceStatus(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceDeclaration": schema_pkg_apis_resource_v1alpha1_ResourceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.ResourceParam": schema_pkg_apis_resource_v1alpha1_ResourceParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1.SecretParam": schema_pkg_apis_resource_v1alpha1_SecretParam(ref), } } @@ -603,6 +612,239 @@ func schema_pkg_apis_pipeline_v1beta1_ClusterTaskList(ref common.ReferenceCallba } } +func schema_pkg_apis_pipeline_v1beta1_CustomRun(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CustomRun represents a single execution of a Custom Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRunSpec", "github.com/tektoncd/pipeline/pkg/apis/run/v1beta1.CustomRunStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_CustomRunList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CustomRunList contains a list of CustomRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.CustomRun", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_CustomRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CustomRunSpec defines the desired state of CustomRun", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "customRef": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef"), + }, + }, + "customSpec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec is a specification of a custom task", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec"), + }, + }, + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), + }, + }, + }, + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Used for cancelling a customrun (and maybe more later on)", + Type: []string{"string"}, + Format: "", + }, + }, + "statusMessage": { + SchemaProps: spec.SchemaProps{ + Description: "Status message for cancellation.", + Type: []string{"string"}, + Format: "", + }, + }, + "retries": { + SchemaProps: spec.SchemaProps{ + Description: "Used for propagating retries count to custom tasks", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Time after which the custom-task times out. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Workspaces is a list of WorkspaceBindings from volumes to workspaces.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedCustomRunSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_EmbeddedCustomRunSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EmbeddedCustomRunSpec allows custom task definitions to be embedded", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec is a specification of a custom task", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + func schema_pkg_apis_pipeline_v1beta1_EmbeddedTask(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -846,6 +1088,40 @@ func schema_pkg_apis_pipeline_v1beta1_InternalTaskModifier(ref common.ReferenceC } } +func schema_pkg_apis_pipeline_v1beta1_Matrix(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Matrix is used to fan out Tasks in a Pipeline", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, + } +} + func schema_pkg_apis_pipeline_v1beta1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1764,6 +2040,12 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatus(ref common.ReferenceCall }, }, }, + "finallyStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, @@ -1882,6 +2164,12 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunStatusFields(ref common.Referen }, }, }, + "finallyStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, @@ -2166,22 +2454,9 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, }, "matrix": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, SchemaProps: spec.SchemaProps{ Description: "Matrix declares parameters used to fan out this task.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), - }, - }, - }, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix"), }, }, "workspaces": { @@ -2213,7 +2488,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTask(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.EmbeddedTask", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Matrix", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WhenExpression", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspacePipelineTaskBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } @@ -2574,36 +2849,6 @@ func schema_pkg_apis_pipeline_v1beta1_PropertySpec(ref common.ReferenceCallback) } } -func schema_pkg_apis_pipeline_v1beta1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResolverParam is a single parameter passed to a resolver.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of the parameter that will be passed to the resolver.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the string value of the parameter that will be passed to the resolver.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - } -} - func schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2618,20 +2863,20 @@ func schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref common.ReferenceCallback) Format: "", }, }, - "resource": { + "params": { VendorExtensible: spec.VendorExtensible{ Extensions: spec.Extensions{ "x-kubernetes-list-type": "atomic", }, }, SchemaProps: spec.SchemaProps{ - Description: "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Description: "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ Default: map[string]interface{}{}, - Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverParam"), + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"), }, }, }, @@ -2641,7 +2886,7 @@ func schema_pkg_apis_pipeline_v1beta1_ResolverRef(ref common.ReferenceCallback) }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ResolverParam"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param"}, } } @@ -5362,6 +5607,222 @@ func schema_pkg_apis_pipeline_v1beta1_WorkspaceUsage(ref common.ReferenceCallbac } } +func schema_pkg_apis_resolution_v1alpha1_ResolutionRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolutionRequest is an object for requesting the content of a Tekton resource like a pipeline.yaml.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds the information for the request part of the resource request.", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status communicates the state of the request and, ultimately, the content of the resolved resource.", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestSpec", "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequestStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_resolution_v1alpha1_ResolutionRequestList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolutionRequestList is a list of ResolutionRequests.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequest"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1.ResolutionRequest", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_resolution_v1alpha1_ResolutionRequestSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolutionRequestSpec are all the fields in the spec of the ResolutionRequest CRD.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters are the runtime attributes passed to the resolver to help it figure out how to resolve the resource being requested. For example: repo URL, commit SHA, path to file, the kind of authentication to leverage, etc.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_resolution_v1alpha1_ResolutionRequestStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolutionRequestStatus are all the fields in a ResolutionRequest's status subresource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Conditions the latest available observations of a resource's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("knative.dev/pkg/apis.Condition"), + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"data"}, + }, + }, + Dependencies: []string{ + "knative.dev/pkg/apis.Condition"}, + } +} + +func schema_pkg_apis_resolution_v1alpha1_ResolutionRequestStatusFields(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolutionRequestStatusFields are the ResolutionRequest-specific fields for the status subresource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"data"}, + }, + }, + } +} + func schema_pkg_apis_resource_v1alpha1_PipelineResource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 631d080d68..460d5788a0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -314,19 +314,23 @@ func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefi return errs } -func validateParametersInTaskMatrix(matrix []Param) (errs *apis.FieldError) { - for _, param := range matrix { - if param.Value.Type != ParamTypeArray { - errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) +func validateParametersInTaskMatrix(matrix *Matrix) (errs *apis.FieldError) { + if matrix != nil { + for _, param := range matrix.Params { + if param.Value.Type != ParamTypeArray { + errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) + } } } return errs } -func validateParameterInOneOfMatrixOrParams(matrix []Param, params []Param) (errs *apis.FieldError) { +func validateParameterInOneOfMatrixOrParams(matrix *Matrix, params []Param) (errs *apis.FieldError) { matrixParameterNames := sets.NewString() - for _, param := range matrix { - matrixParameterNames.Insert(param.Name) + if matrix != nil { + for _, param := range matrix.Params { + matrixParameterNames.Insert(param.Name) + } } for _, param := range params { if matrixParameterNames.Has(param.Name) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go index d2630e07cb..992d738d14 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go @@ -21,6 +21,8 @@ import ( "fmt" v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -34,6 +36,9 @@ func (p *Pipeline) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.Pipeline: sink.ObjectMeta = p.ObjectMeta + if err := serializePipelineResources(&sink.ObjectMeta, &p.Spec); err != nil { + return err + } return p.Spec.ConvertTo(ctx, &sink.Spec) default: return fmt.Errorf("unknown version, got: %T", sink) @@ -79,7 +84,6 @@ func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1.PipelineSpec) er } sink.Finally = append(sink.Finally, new) } - // TODO: Handle Resources in #4546 return nil } @@ -88,6 +92,9 @@ func (p *Pipeline) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.Pipeline: p.ObjectMeta = source.ObjectMeta + if err := deserializePipelineResources(&p.ObjectMeta, &p.Spec); err != nil { + return err + } return p.Spec.ConvertFrom(ctx, &source.Spec) default: return fmt.Errorf("unknown version, got: %T", p) @@ -164,10 +171,10 @@ func (pt PipelineTask) convertTo(ctx context.Context, sink *v1.PipelineTask) err sink.Params = append(sink.Params, new) } sink.Matrix = nil - for _, m := range pt.Matrix { - new := v1.Param{} - m.convertTo(ctx, &new) - sink.Matrix = append(sink.Matrix, new) + if pt.IsMatrixed() { + new := v1.Matrix{} + pt.Matrix.convertTo(ctx, &new) + sink.Matrix = &new } sink.Workspaces = nil for _, w := range pt.Workspaces { @@ -210,10 +217,10 @@ func (pt *PipelineTask) convertFrom(ctx context.Context, source v1.PipelineTask) pt.Params = append(pt.Params, new) } pt.Matrix = nil - for _, m := range source.Matrix { - new := Param{} - new.convertFrom(ctx, m) - pt.Matrix = append(pt.Matrix, new) + if source.IsMatrixed() { + new := Matrix{} + new.convertFrom(ctx, *source.Matrix) + pt.Matrix = &new } pt.Workspaces = nil for _, w := range source.Workspaces { @@ -254,6 +261,22 @@ func (we *WhenExpression) convertFrom(ctx context.Context, source v1.WhenExpress we.Values = source.Values } +func (m *Matrix) convertTo(ctx context.Context, sink *v1.Matrix) { + for _, param := range m.Params { + new := v1.Param{} + param.convertTo(ctx, &new) + sink.Params = append(sink.Params, new) + } +} + +func (m *Matrix) convertFrom(ctx context.Context, source v1.Matrix) { + for _, param := range source.Params { + new := Param{} + new.convertFrom(ctx, param) + m.Params = append(m.Params, new) + } +} + func (pr PipelineResult) convertTo(ctx context.Context, sink *v1.PipelineResult) { sink.Name = pr.Name sink.Type = v1.ResultsType(pr.Type) @@ -271,3 +294,32 @@ func (pr *PipelineResult) convertFrom(ctx context.Context, source v1.PipelineRes newValue.convertFrom(ctx, source.Value) pr.Value = newValue } + +func (ptm PipelineTaskMetadata) convertTo(ctx context.Context, sink *v1.PipelineTaskMetadata) { + sink.Labels = ptm.Labels + sink.Annotations = ptm.Annotations +} + +func (ptm *PipelineTaskMetadata) convertFrom(ctx context.Context, source v1.PipelineTaskMetadata) { + ptm.Labels = source.Labels + ptm.Annotations = source.Labels +} + +func serializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error { + if spec.Resources == nil { + return nil + } + return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) +} + +func deserializePipelineResources(meta *metav1.ObjectMeta, spec *PipelineSpec) error { + resources := &[]PipelineDeclaredResource{} + err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) + if err != nil { + return err + } + if len(*resources) != 0 { + spec.Resources = *resources + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index f994fb0ad8..6102977c04 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -204,8 +204,7 @@ type PipelineTask struct { // Matrix declares parameters used to fan out this task. // +optional - // +listType=atomic - Matrix []Param `json:"matrix,omitempty"` + Matrix *Matrix `json:"matrix,omitempty"` // Workspaces maps workspaces from the pipeline spec to the workspaces // declared in the Task. @@ -220,6 +219,16 @@ type PipelineTask struct { Timeout *metav1.Duration `json:"timeout,omitempty"` } +// Matrix is used to fan out Tasks in a Pipeline +type Matrix struct { + // Params is a list of parameters used to fan out the pipelineTask + // Params takes only `Parameters` of type `"array"` + // Each array element is supplied to the `PipelineTask` by substituting `params` of type `"string"` in the underlying `Task`. + // The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting. + // +listType=atomic + Params []Param `json:"params,omitempty"` +} + // validateRefOrSpec validates at least one of taskRef or taskSpec is specified func (pt PipelineTask) validateRefOrSpec() (errs *apis.FieldError) { // can't have both taskRef and taskSpec at the same time @@ -295,7 +304,7 @@ func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) if pt.TaskRef.Resolver != "" { errs = errs.Also(apis.ErrDisallowedFields("taskref.resolver")) } - if len(pt.TaskRef.Resource) > 0 { + if len(pt.TaskRef.Params) > 0 { errs = errs.Also(apis.ErrDisallowedFields("taskref.resource")) } } @@ -303,8 +312,13 @@ func (pt PipelineTask) validateTask(ctx context.Context) (errs *apis.FieldError) return errs } +// IsMatrixed return whether pipeline task is matrixed +func (pt *PipelineTask) IsMatrixed() bool { + return pt.Matrix != nil && len(pt.Matrix.Params) > 0 +} + func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldError) { - if len(pt.Matrix) != 0 { + if pt.IsMatrixed() { // This is an alpha feature and will fail validation if it's used in a pipeline spec // when the enable-api-fields feature gate is anything but "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) @@ -349,11 +363,11 @@ func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { // GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. func (pt *PipelineTask) GetMatrixCombinationsCount() int { - if len(pt.Matrix) == 0 { + if !pt.IsMatrixed() { return 0 } count := 1 - for _, param := range pt.Matrix { + for _, param := range pt.Matrix.Params { count *= len(param.Value.ArrayVal) } return count @@ -512,44 +526,29 @@ func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { // Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering func (pt PipelineTask) Deps() []string { - deps := []string{} + // hold the list of dependencies in a set to avoid duplicates + deps := sets.NewString() - deps = append(deps, pt.resourceDeps()...) - deps = append(deps, pt.orderingDeps()...) - - uniqueDeps := sets.NewString() - for _, w := range deps { - if uniqueDeps.Has(w) { - continue - } - uniqueDeps.Insert(w) - } - - return uniqueDeps.List() -} - -func (pt PipelineTask) resourceDeps() []string { - resourceDeps := []string{} + // add any new dependents from a resource/workspace if pt.Resources != nil { for _, rd := range pt.Resources.Inputs { - resourceDeps = append(resourceDeps, rd.From...) + for _, f := range rd.From { + deps.Insert(f) + } } } - // Add any dependents from result references. + // add any new dependents from result references - resource dependency for _, ref := range PipelineTaskResultRefs(&pt) { - resourceDeps = append(resourceDeps, ref.PipelineTask) + deps.Insert(ref.PipelineTask) } - return resourceDeps -} - -func (pt PipelineTask) orderingDeps() []string { - orderingDeps := []string{} + // add any new dependents from runAfter - order dependency for _, runAfter := range pt.RunAfter { - orderingDeps = append(orderingDeps, runAfter) + deps.Insert(runAfter) } - return orderingDeps + + return deps.List() } // PipelineTaskList is a list of PipelineTasks diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index 94f388b953..fbfab74784 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -59,17 +59,16 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validateFrom(ps.Tasks)) // Validate the pipeline task graph errs = errs.Also(validateGraph(ps.Tasks)) - errs = errs.Also(validateParamResults(ps.Tasks)) // The parameter variables should be valid - errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks")) - errs = errs.Also(validatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally")) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Tasks, ps.Params).ViaField("tasks")) + errs = errs.Also(ValidatePipelineParameterVariables(ctx, ps.Finally, ps.Params).ViaField("finally")) errs = errs.Also(validatePipelineContextVariables(ps.Tasks).ViaField("tasks")) errs = errs.Also(validatePipelineContextVariables(ps.Finally).ViaField("finally")) errs = errs.Also(validateExecutionStatusVariables(ps.Tasks, ps.Finally)) // Validate the pipeline's workspaces. errs = errs.Also(validatePipelineWorkspacesDeclarations(ps.Workspaces)) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) - errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) + errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Tasks).ViaField("tasks")) + errs = errs.Also(validatePipelineWorkspacesUsage(ctx, ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks, ps.Finally)) errs = errs.Also(validateTasksAndFinallySection(ps)) @@ -112,7 +111,10 @@ func validatePipelineWorkspacesDeclarations(wss []PipelineWorkspaceDeclaration) // validatePipelineWorkspacesUsage validates that all the referenced workspaces (by pipeline tasks) are specified in // the pipeline -func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { +func validatePipelineWorkspacesUsage(ctx context.Context, wss []PipelineWorkspaceDeclaration, pts []PipelineTask) (errs *apis.FieldError) { + if config.ValidateParameterVariablesAndWorkspaces(ctx) == false { + return nil + } workspaceNames := sets.NewString() for _, ws := range wss { workspaceNames.Insert(ws.Name) @@ -124,10 +126,10 @@ func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []P return errs } -// validatePipelineParameterVariables validates parameters with those specified by each pipeline task, +// ValidatePipelineParameterVariables validates parameters with those specified by each pipeline task, // (1) it validates the type of parameter is either string or array (2) parameter default value matches // with the type of that param (3) ensures that the referenced param variable is defined is part of the param declarations -func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { +func ValidatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { parameterNames := sets.NewString() arrayParameterNames := sets.NewString() objectParameterNameKeys := map[string][]string{} @@ -151,13 +153,18 @@ func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTas } } } - return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { + errs = errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) + } + return errs } func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + if task.IsMatrixed() { + errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + } errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } return errs @@ -177,7 +184,11 @@ func validatePipelineContextVariables(tasks []PipelineTask) *apis.FieldError { ) var paramValues []string for _, task := range tasks { - for _, param := range append(task.Params, task.Matrix...) { + var matrixParams []Param + if task.IsMatrixed() { + matrixParams = task.Matrix.Params + } + for _, param := range append(task.Params, matrixParams...) { paramValues = append(paramValues, param.Value.StringVal) paramValues = append(paramValues, param.Value.ArrayVal...) } @@ -226,26 +237,6 @@ func validatePipelineContextVariablesInParamValues(paramValues []string, prefix return errs } -// validateParamResults ensures that task result variables are properly configured -func validateParamResults(tasks []PipelineTask) (errs *apis.FieldError) { - for idx, task := range tasks { - for _, param := range task.Params { - expressions, ok := GetVarSubstitutionExpressionsForParam(param) - if ok { - if LooksLikeContainsResultRefs(expressions) { - expressions = filter(expressions, looksLikeResultRef) - resultRefs := NewResultRefs(expressions) - if len(expressions) != len(resultRefs) { - errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs), - "value").ViaFieldKey("params", param.Name).ViaFieldIndex("tasks", idx)) - } - } - } - } - } - return errs -} - func filter(arr []string, cond func(string) bool) []string { result := []string{} for i := range arr { @@ -500,11 +491,11 @@ func validateFrom(tasks []PipelineTask) (errs *apis.FieldError) { // validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency // cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource // is actually an output of the Task it should come from. -func validateGraph(tasks []PipelineTask) *apis.FieldError { +func validateGraph(tasks []PipelineTask) (errs *apis.FieldError) { if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil { - return apis.ErrInvalidValue(err.Error(), "tasks") + errs = errs.Also(apis.ErrInvalidValue(err.Error(), "tasks")) } - return nil + return errs } func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.FieldError) { @@ -517,7 +508,7 @@ func validateMatrix(ctx context.Context, tasks []PipelineTask) (errs *apis.Field func validateResultsFromMatrixedPipelineTasksNotConsumed(tasks []PipelineTask, finally []PipelineTask) (errs *apis.FieldError) { matrixedPipelineTasks := sets.String{} for _, pt := range tasks { - if len(pt.Matrix) != 0 { + if pt.IsMatrixed() { matrixedPipelineTasks.Insert(pt.Name) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go new file mode 100644 index 0000000000..f748f56c3d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_conversion.go @@ -0,0 +1,45 @@ +package v1beta1 + +import ( + "context" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" +) + +func (pr PipelineRef) convertTo(ctx context.Context, sink *v1.PipelineRef) { + sink.Name = pr.Name + sink.APIVersion = pr.APIVersion + new := v1.ResolverRef{} + pr.ResolverRef.convertTo(ctx, &new) + sink.ResolverRef = new + pr.convertBundleToResolver(sink) +} + +func (pr *PipelineRef) convertFrom(ctx context.Context, source v1.PipelineRef) { + pr.Name = source.Name + pr.APIVersion = source.APIVersion + new := ResolverRef{} + new.convertFrom(ctx, source.ResolverRef) + pr.ResolverRef = new +} + +// convertBundleToResolver converts v1beta1 bundle string to a remote reference with the bundle resolver in v1. +// The conversion from Resolver to Bundle is not being supported since remote resolution would be turned on by +// default and it will be in beta before the stored version of CRD getting swapped to v1. +func (pr PipelineRef) convertBundleToResolver(sink *v1.PipelineRef) { + if pr.Bundle != "" { + sink.ResolverRef = v1.ResolverRef{ + Resolver: "bundles", + Params: []v1.Param{{ + Name: "bundle", + Value: v1.ParamValue{StringVal: pr.Bundle}, + }, { + Name: "name", + Value: v1.ParamValue{StringVal: pr.Name}, + }, { + Name: "kind", + Value: v1.ParamValue{StringVal: "Task"}, + }}, + } + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go index 45300a5480..2c7efd57c7 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go @@ -33,32 +33,39 @@ func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - switch { - case ref.Resolver != "": - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + if ref.Resolver != "" || ref.Params != nil { + if ref.Resolver != "" { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) + } } - if ref.Bundle != "" { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) + if ref.Params != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "params", config.AlphaAPIFields).ViaField("params")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "params")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "params")) + } + if ref.Resolver == "" { + errs = errs.Also(apis.ErrMissingField("resolver")) + } + errs = errs.Also(ValidateParameters(ctx, ref.Params)) + errs = errs.Also(validateResolutionParamTypes(ref.Params).ViaField("params")) } - case ref.Resource != nil: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) + } else { + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) } if ref.Bundle != "" { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) - } - if ref.Resolver == "" { - errs = errs.Also(apis.ErrMissingField("resolver")) - } - case ref.Name == "": - errs = errs.Also(apis.ErrMissingField("name")) - case ref.Bundle != "": - errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } } } return @@ -73,3 +80,14 @@ func validateBundleFeatureFlag(ctx context.Context, featureName string, wantValu } return nil } + +func validateResolutionParamTypes(params []Param) (errs *apis.FieldError) { + for i, p := range params { + if p.Value.Type == ParamTypeArray || p.Value.Type == ParamTypeObject { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("remote resolution parameter type must be %s, not %s", + string(ParamTypeString), string(p.Value.Type))).ViaIndex(i)) + } + } + + return errs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go index 5365bfb1d0..0943b46cca 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go @@ -20,23 +20,209 @@ import ( "context" "fmt" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) var _ apis.Convertible = (*PipelineRun)(nil) // ConvertTo implements apis.Convertible -func (pr *PipelineRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { +func (pr *PipelineRun) ConvertTo(ctx context.Context, to apis.Convertible) error { if apis.IsInDelete(ctx) { return nil } - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) + switch sink := to.(type) { + case *v1.PipelineRun: + sink.ObjectMeta = pr.ObjectMeta + if err := serializePipelineRunResources(&sink.ObjectMeta, &pr.Spec); err != nil { + return err + } + return pr.Spec.ConvertTo(ctx, &sink.Spec) + default: + return fmt.Errorf("unknown version, got: %T", sink) + } +} + +// ConvertTo implements apis.Convertible +func (prs PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1.PipelineRunSpec) error { + if prs.PipelineRef != nil { + sink.PipelineRef = &v1.PipelineRef{} + prs.PipelineRef.convertTo(ctx, sink.PipelineRef) + } + if prs.PipelineSpec != nil { + sink.PipelineSpec = &v1.PipelineSpec{} + err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec) + if err != nil { + return err + } + } + sink.Params = nil + for _, p := range prs.Params { + new := v1.Param{} + p.convertTo(ctx, &new) + sink.Params = append(sink.Params, new) + } + sink.Status = v1.PipelineRunSpecStatus(prs.Status) + if prs.Timeouts != nil { + sink.Timeouts = &v1.TimeoutFields{} + prs.Timeouts.convertTo(ctx, sink.Timeouts) + } + if prs.Timeout != nil { + sink.Timeouts = &v1.TimeoutFields{} + sink.Timeouts.Pipeline = prs.Timeout + } + sink.TaskRunTemplate = v1.PipelineTaskRunTemplate{} + sink.TaskRunTemplate.PodTemplate = prs.PodTemplate + sink.TaskRunTemplate.ServiceAccountName = prs.ServiceAccountName + sink.Workspaces = nil + for _, w := range prs.Workspaces { + new := v1.WorkspaceBinding{} + w.convertTo(ctx, &new) + sink.Workspaces = append(sink.Workspaces, new) + } + sink.TaskRunSpecs = nil + for _, ptrs := range prs.TaskRunSpecs { + new := v1.PipelineTaskRunSpec{} + ptrs.convertTo(ctx, &new) + sink.TaskRunSpecs = append(sink.TaskRunSpecs, new) + } + return nil } // ConvertFrom implements apis.Convertible -func (pr *PipelineRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { - if apis.IsInDelete(ctx) { +func (pr *PipelineRun) ConvertFrom(ctx context.Context, from apis.Convertible) error { + switch source := from.(type) { + case *v1.PipelineRun: + pr.ObjectMeta = source.ObjectMeta + if err := deserializePipelineRunResources(&pr.ObjectMeta, &pr.Spec); err != nil { + return err + } + return pr.Spec.ConvertFrom(ctx, &source.Spec) + default: + return fmt.Errorf("unknown version, got: %T", pr) + } +} + +// ConvertFrom implements apis.Convertible +func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1.PipelineRunSpec) error { + if source.PipelineRef != nil { + newPipelineRef := PipelineRef{} + newPipelineRef.convertFrom(ctx, *source.PipelineRef) + prs.PipelineRef = &newPipelineRef + } + if source.PipelineSpec != nil { + newPipelineSpec := PipelineSpec{} + err := newPipelineSpec.ConvertFrom(ctx, source.PipelineSpec) + if err != nil { + return err + } + prs.PipelineSpec = &newPipelineSpec + } + prs.Params = nil + for _, p := range source.Params { + new := Param{} + new.convertFrom(ctx, p) + prs.Params = append(prs.Params, new) + } + prs.ServiceAccountName = source.TaskRunTemplate.ServiceAccountName + prs.Status = PipelineRunSpecStatus(source.Status) + if source.Timeouts != nil { + newTimeouts := &TimeoutFields{} + newTimeouts.convertFrom(ctx, *source.Timeouts) + prs.Timeouts = newTimeouts + } + prs.PodTemplate = source.TaskRunTemplate.PodTemplate + prs.Workspaces = nil + for _, w := range source.Workspaces { + new := WorkspaceBinding{} + new.convertFrom(ctx, w) + prs.Workspaces = append(prs.Workspaces, new) + } + prs.TaskRunSpecs = nil + for _, trs := range source.TaskRunSpecs { + new := PipelineTaskRunSpec{} + new.convertFrom(ctx, trs) + prs.TaskRunSpecs = append(prs.TaskRunSpecs, new) + } + return nil +} + +func (tf TimeoutFields) convertTo(ctx context.Context, sink *v1.TimeoutFields) { + sink.Pipeline = tf.Pipeline + sink.Tasks = tf.Tasks + sink.Finally = tf.Finally +} + +func (tf *TimeoutFields) convertFrom(ctx context.Context, source v1.TimeoutFields) { + tf.Pipeline = source.Pipeline + tf.Tasks = source.Tasks + tf.Finally = source.Finally +} + +func (ptrs PipelineTaskRunSpec) convertTo(ctx context.Context, sink *v1.PipelineTaskRunSpec) { + sink.PipelineTaskName = ptrs.PipelineTaskName + sink.ServiceAccountName = ptrs.TaskServiceAccountName + sink.PodTemplate = ptrs.TaskPodTemplate + sink.StepOverrides = nil + for _, so := range ptrs.StepOverrides { + new := v1.TaskRunStepOverride{} + so.convertTo(ctx, &new) + sink.StepOverrides = append(sink.StepOverrides, new) + } + sink.SidecarOverrides = nil + for _, so := range ptrs.SidecarOverrides { + new := v1.TaskRunSidecarOverride{} + so.convertTo(ctx, &new) + sink.SidecarOverrides = append(sink.SidecarOverrides, new) + } + if ptrs.Metadata != nil { + sink.Metadata = &v1.PipelineTaskMetadata{} + ptrs.Metadata.convertTo(ctx, sink.Metadata) + } + sink.ComputeResources = ptrs.ComputeResources +} + +func (ptrs *PipelineTaskRunSpec) convertFrom(ctx context.Context, source v1.PipelineTaskRunSpec) { + ptrs.PipelineTaskName = source.PipelineTaskName + ptrs.TaskServiceAccountName = source.ServiceAccountName + ptrs.TaskPodTemplate = source.PodTemplate + ptrs.StepOverrides = nil + for _, so := range source.StepOverrides { + new := TaskRunStepOverride{} + new.convertFrom(ctx, so) + ptrs.StepOverrides = append(ptrs.StepOverrides, new) + } + ptrs.SidecarOverrides = nil + for _, so := range source.SidecarOverrides { + new := TaskRunSidecarOverride{} + new.convertFrom(ctx, so) + ptrs.SidecarOverrides = append(ptrs.SidecarOverrides, new) + } + if source.Metadata != nil { + newMetadata := PipelineTaskMetadata{} + newMetadata.convertFrom(ctx, *source.Metadata) + ptrs.Metadata = &newMetadata + } + ptrs.ComputeResources = source.ComputeResources +} + +func serializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error { + if spec.Resources == nil { return nil } - return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) + return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) +} + +func deserializePipelineRunResources(meta *metav1.ObjectMeta, spec *PipelineRunSpec) error { + resources := []PipelineResourceBinding{} + err := version.DeserializeFromMetadata(meta, &resources, resourcesAnnotationKey) + if err != nil { + return err + } + if len(resources) != 0 { + spec.Resources = resources + } + return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 6810a852ed..354ec62180 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -31,7 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) @@ -174,6 +174,40 @@ func (pr *PipelineRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bo return false } +// HaveTasksTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Tasks +func (pr *PipelineRun) HaveTasksTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.TasksTimeout() + startTime := pr.Status.StartTime + + if !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + +// HasFinallyTimedOut returns true if a pipelinerun has exceeded its spec.Timeouts.Finally, based on status.FinallyStartTime +func (pr *PipelineRun) HasFinallyTimedOut(ctx context.Context, c clock.PassiveClock) bool { + timeout := pr.FinallyTimeout() + startTime := pr.Status.FinallyStartTime + + if startTime != nil && !startTime.IsZero() && timeout != nil { + if timeout.Duration == config.NoTimeoutDuration { + return false + } + runtime := c.Since(startTime.Time) + if runtime > timeout.Duration { + return true + } + } + return false +} + // HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is // used for creating PersistentVolumeClaims with an OwnerReference for each run func (pr *PipelineRun) HasVolumeClaimTemplate() bool { @@ -418,6 +452,10 @@ type PipelineRunStatusFields struct { // +optional // +listType=atomic ChildReferences []ChildStatusReference `json:"childReferences,omitempty"` + + // FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed. + // +optional + FinallyStartTime *metav1.Time `json:"finallyStartTime,omitempty"` } // SkippedTask is used to describe the Tasks that were skipped due to their When Expressions @@ -450,6 +488,12 @@ const ( GracefullyStoppedSkip SkippingReason = "PipelineRun was gracefully stopped" // MissingResultsSkip means the task was skipped because it's missing necessary results MissingResultsSkip SkippingReason = "Results were missing" + // PipelineTimedOutSkip means the task was skipped because the PipelineRun has passed its overall timeout. + PipelineTimedOutSkip SkippingReason = "PipelineRun timeout has been reached" + // TasksTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Tasks. + TasksTimedOutSkip SkippingReason = "PipelineRun Tasks timeout has been reached" + // FinallyTimedOutSkip means the task was skipped because the PipelineRun has passed its Timeouts.Finally. + FinallyTimedOutSkip SkippingReason = "PipelineRun Finally timeout has been reached" // None means the task was not skipped None SkippingReason = "None" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 4e8ff05181..0859625c85 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -25,6 +25,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/validate" "github.com/tektoncd/pipeline/pkg/apis/version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -65,8 +66,14 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) ctx = config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, true) errs = errs.Also(ps.PipelineSpec.Validate(ctx).ViaField("pipelineSpec")) } + + // Validate PipelineRun parameters + errs = errs.Also(ps.validatePipelineRunParameters(ctx)) + // Validate propagated parameters errs = errs.Also(ps.validateInlineParameters(ctx)) + // Validate propagated workspaces + errs = errs.Also(ps.validatePropagatedWorkspaces(ctx)) if ps.Timeout != nil { // timeout should be a valid duration of at least 0. @@ -117,6 +124,60 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) return errs } +func (ps *PipelineRunSpec) validatePipelineRunParameters(ctx context.Context) (errs *apis.FieldError) { + if len(ps.Params) == 0 { + return errs + } + + // Validate parameter types and uniqueness + errs = errs.Also(ValidateParameters(ctx, ps.Params).ViaField("params")) + + // Validate that task results aren't used in param values + for _, param := range ps.Params { + expressions, ok := GetVarSubstitutionExpressionsForParam(param) + if ok { + if LooksLikeContainsResultRefs(expressions) { + expressions = filter(expressions, looksLikeResultRef) + resultRefs := NewResultRefs(expressions) + if len(resultRefs) > 0 { + errs = errs.Also(apis.ErrInvalidValue(fmt.Sprintf("cannot use result expressions in %v as PipelineRun parameter values", expressions), + "value").ViaFieldKey("params", param.Name)) + } + } + } + } + return errs +} + +// validatePropagatedWorkspaces validates workspaces that are propagated. +func (ps *PipelineRunSpec) validatePropagatedWorkspaces(ctx context.Context) (errs *apis.FieldError) { + if ps.PipelineSpec == nil { + return errs + } + workspaceNames := sets.NewString() + for _, w := range ps.Workspaces { + workspaceNames.Insert(w.Name) + } + + for _, w := range ps.PipelineSpec.Workspaces { + workspaceNames.Insert(w.Name) + } + + for i, pt := range ps.PipelineSpec.Tasks { + for _, w := range pt.Workspaces { + workspaceNames.Insert(w.Name) + } + errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i)) + } + for i, pt := range ps.PipelineSpec.Finally { + for _, w := range pt.Workspaces { + workspaceNames.Insert(w.Name) + } + errs = errs.Also(pt.validateWorkspaces(workspaceNames).ViaIndex(i)) + } + return errs +} + // validateInlineParameters validates parameters that are defined inline. // This is crucial for propagated parameters since the parameters could // be defined under pipelineRun and then called directly in the task steps. @@ -126,66 +187,62 @@ func (ps *PipelineRunSpec) validateInlineParameters(ctx context.Context) (errs * if ps.PipelineSpec == nil { return errs } - var paramSpec []ParamSpec + paramSpecForValidation := make(map[string]ParamSpec) for _, p := range ps.Params { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) + } + for _, p := range ps.PipelineSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) } - paramSpec = append(paramSpec, pSpec) } - paramSpec = appendParamSpec(paramSpec, ps.PipelineSpec.Params) for _, pt := range ps.PipelineSpec.Tasks { - paramSpec = appendParam(paramSpec, pt.Params) + paramSpecForValidation = appendPipelineTaskParams(paramSpecForValidation, pt.Params) if pt.TaskSpec != nil && pt.TaskSpec.Params != nil { - paramSpec = appendParamSpec(paramSpec, pt.TaskSpec.Params) + for _, p := range pt.TaskSpec.Params { + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) + } + } } } + var paramSpec []ParamSpec + for _, v := range paramSpecForValidation { + paramSpec = append(paramSpec, v) + } if ps.PipelineSpec != nil && ps.PipelineSpec.Tasks != nil { for _, pt := range ps.PipelineSpec.Tasks { if pt.TaskSpec != nil && pt.TaskSpec.Steps != nil { + errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) errs = errs.Also(ValidateParameterVariables( config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), pt.TaskSpec.Steps, paramSpec)) } } + errs = errs.Also(ValidatePipelineParameterVariables( + config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ps.PipelineSpec.Tasks, paramSpec)) } return errs } -func appendParamSpec(paramSpec []ParamSpec, params []ParamSpec) []ParamSpec { +func appendPipelineTaskParams(paramSpecForValidation map[string]ParamSpec, params []Param) map[string]ParamSpec { for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break + if pSpec, ok := paramSpecForValidation[p.Name]; ok { + if p.Value.ObjectVal != nil { + for k, v := range p.Value.ObjectVal { + pSpec.Default.ObjectVal[k] = v + pSpec.Properties[k] = PropertySpec{Type: ParamTypeString} + } } - } - if !skip { - paramSpec = append(paramSpec, p) - } - } - return paramSpec -} - -func appendParam(paramSpec []ParamSpec, params []Param) []ParamSpec { - for _, p := range params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, - } - paramSpec = append(paramSpec, pSpec) + paramSpecForValidation[p.Name] = pSpec + } else { + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) } } - return paramSpec + return paramSpecForValidation } func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { @@ -220,7 +277,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Tasks != nil { tasksTimeoutErr := false tasksTimeoutStr := ps.Timeouts.Tasks.Duration.String() - if ps.Timeouts.Tasks.Duration > timeout { + if ps.Timeouts.Tasks.Duration > timeout && timeout != config.NoTimeoutDuration { tasksTimeoutErr = true } if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { @@ -235,7 +292,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Finally != nil { finallyTimeoutErr := false finallyTimeoutStr := ps.Timeouts.Finally.Duration.String() - if ps.Timeouts.Finally.Duration > timeout { + if ps.Timeouts.Finally.Duration > timeout && timeout != config.NoTimeoutDuration { finallyTimeoutErr = true } if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go index b63c978a59..18d3c07bb6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_conversion.go @@ -8,30 +8,20 @@ import ( func (rr ResolverRef) convertTo(ctx context.Context, sink *v1.ResolverRef) { sink.Resolver = v1.ResolverName(rr.Resolver) - sink.Resource = nil - for _, r := range rr.Resource { - new := v1.ResolverParam{} + sink.Params = nil + for _, r := range rr.Params { + new := v1.Param{} r.convertTo(ctx, &new) - sink.Resource = append(sink.Resource, new) + sink.Params = append(sink.Params, new) } } func (rr *ResolverRef) convertFrom(ctx context.Context, source v1.ResolverRef) { rr.Resolver = ResolverName(source.Resolver) - rr.Resource = nil - for _, r := range source.Resource { - new := ResolverParam{} + rr.Params = nil + for _, r := range source.Params { + new := Param{} new.convertFrom(ctx, r) - rr.Resource = append(rr.Resource, new) + rr.Params = append(rr.Params, new) } } - -func (rp ResolverParam) convertTo(ctx context.Context, sink *v1.ResolverParam) { - sink.Name = rp.Name - sink.Value = rp.Value -} - -func (rp *ResolverParam) convertFrom(ctx context.Context, source v1.ResolverParam) { - rp.Name = source.Name - rp.Value = source.Value -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go index 1f7a34df7a..2da2fae52f 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resolver_types.go @@ -28,21 +28,11 @@ type ResolverRef struct { // resolution of the referenced Tekton resource, such as "git". // +optional Resolver ResolverName `json:"resolver,omitempty"` - // Resource contains the parameters used to identify the + // Params contains the parameters used to identify the // referenced Tekton resource. Example entries might include // "repo" or "path" but the set of params ultimately depends on // the chosen resolver. // +optional // +listType=atomic - Resource []ResolverParam `json:"resource,omitempty"` -} - -// ResolverParam is a single parameter passed to a resolver. -type ResolverParam struct { - // Name is the name of the parameter that will be passed to the - // resolver. - Name string `json:"name"` - // Value is the string value of the parameter that will be - // passed to the resolver. - Value string `json:"value"` + Params []Param `json:"params,omitempty"` } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go index 8c9800d277..c6d2526fc3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go @@ -101,7 +101,8 @@ func LooksLikeContainsResultRefs(expressions []string) bool { // looksLikeResultRef attempts to check if the given string looks like it contains any // result references. Returns true if it does, false otherwise func looksLikeResultRef(expression string) bool { - return (strings.HasPrefix(expression, "task") || strings.HasPrefix(expression, "finally")) && strings.Contains(expression, ".result") + subExpressions := strings.Split(expression, ".") + return len(subExpressions) >= 4 && (subExpressions[0] == ResultTaskPart || subExpressions[0] == ResultFinallyPart) && subExpressions[2] == ResultResultPart } // GetVarSubstitutionExpressionsForParam extracts all the value between "$(" and ")"" for a parameter @@ -170,22 +171,22 @@ func stripVarSubExpression(expression string) string { // - Output: "", "", 0, "", error // TODO: may use regex for each type to handle possible reference formats func parseExpression(substitutionExpression string) (string, string, int, string, error) { - subExpressions := strings.Split(substitutionExpression, ".") - - // For string result: tasks..results. - // For array result: tasks..results.[index] - if len(subExpressions) == 4 && (subExpressions[0] == ResultTaskPart || subExpressions[0] == ResultFinallyPart) && subExpressions[2] == ResultResultPart { - resultName, stringIdx := ParseResultName(subExpressions[3]) - if stringIdx != "" { - intIdx, _ := strconv.Atoi(stringIdx) - return subExpressions[1], resultName, intIdx, "", nil - } - return subExpressions[1], resultName, 0, "", nil - } - // For object type result: tasks..results.. - if len(subExpressions) == 5 && (subExpressions[0] == ResultTaskPart || subExpressions[0] == ResultFinallyPart) && subExpressions[2] == ResultResultPart { - return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil + if looksLikeResultRef(substitutionExpression) { + subExpressions := strings.Split(substitutionExpression, ".") + // For string result: tasks..results. + // For array result: tasks..results.[index] + if len(subExpressions) == 4 { + resultName, stringIdx := ParseResultName(subExpressions[3]) + if stringIdx != "" { + intIdx, _ := strconv.Atoi(stringIdx) + return subExpressions[1], resultName, intIdx, "", nil + } + return subExpressions[1], resultName, 0, "", nil + } else if len(subExpressions) == 5 { + // For object type result: tasks..results.. + return subExpressions[1], subExpressions[3], 0, subExpressions[4], nil + } } return "", "", 0, "", fmt.Errorf("must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) @@ -208,7 +209,11 @@ func ParseResultName(resultName string) (string, string) { // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - for _, p := range append(pt.Params, pt.Matrix...) { + var matrixParams []Param + if pt.IsMatrixed() { + matrixParams = pt.Matrix.Params + } + for _, p := range append(pt.Params, matrixParams...) { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index af1bc4aa80..6a831aaa72 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -234,6 +234,127 @@ "description": "PipelineResourceStatus does not contain anything because PipelineResources on their own do not have a status Deprecated", "type": "object" }, + "v1alpha1.ResolutionRequest": { + "description": "ResolutionRequest is an object for requesting the content of a Tekton resource like a pipeline.yaml.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds the information for the request part of the resource request.", + "default": {}, + "$ref": "#/definitions/v1alpha1.ResolutionRequestSpec" + }, + "status": { + "description": "Status communicates the state of the request and, ultimately, the content of the resolved resource.", + "default": {}, + "$ref": "#/definitions/v1alpha1.ResolutionRequestStatus" + } + } + }, + "v1alpha1.ResolutionRequestList": { + "description": "ResolutionRequestList is a list of ResolutionRequests.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1alpha1.ResolutionRequest" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, + "v1alpha1.ResolutionRequestSpec": { + "description": "ResolutionRequestSpec are all the fields in the spec of the ResolutionRequest CRD.", + "type": "object", + "properties": { + "params": { + "description": "Parameters are the runtime attributes passed to the resolver to help it figure out how to resolve the resource being requested. For example: repo URL, commit SHA, path to file, the kind of authentication to leverage, etc.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + } + } + }, + "v1alpha1.ResolutionRequestStatus": { + "description": "ResolutionRequestStatus are all the fields in a ResolutionRequest's status subresource.", + "type": "object", + "required": [ + "data" + ], + "properties": { + "annotations": { + "description": "Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "conditions": { + "description": "Conditions the latest available observations of a resource's current state.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/knative.Condition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "data": { + "description": "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.", + "type": "string", + "default": "" + }, + "observedGeneration": { + "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", + "type": "integer", + "format": "int64" + } + } + }, + "v1alpha1.ResolutionRequestStatusFields": { + "description": "ResolutionRequestStatusFields are the ResolutionRequest-specific fields for the status subresource.", + "type": "object", + "required": [ + "data" + ], + "properties": { + "data": { + "description": "Data is a string representation of the resolved content of the requested resource in-lined into the ResolutionRequest object.", + "type": "string", + "default": "" + } + } + }, "v1alpha1.ResourceDeclaration": { "description": "ResourceDeclaration defines an input or output PipelineResource declared as a requirement by another type such as a Task or Condition. The Name field will be used to refer to these PipelineResources within the type's definition, and when provided as an Input, the Name will be the path to the volume mounted containing this PipelineResource as an input (e.g. an input Resource named `workspace` will be mounted at `/workspace`).", "type": "object", @@ -430,6 +551,132 @@ } } }, + "v1beta1.CustomRun": { + "description": "CustomRun represents a single execution of a Custom Task.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "default": {}, + "$ref": "#/definitions/v1beta1.CustomRunSpec" + }, + "status": { + "default": {}, + "$ref": "#/definitions/github.com.tektoncd.pipeline.pkg.apis.run.v1beta1.CustomRunStatus" + } + } + }, + "v1beta1.CustomRunList": { + "description": "CustomRunList contains a list of CustomRun", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.CustomRun" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, + "v1beta1.CustomRunSpec": { + "description": "CustomRunSpec defines the desired state of CustomRun", + "type": "object", + "properties": { + "customRef": { + "$ref": "#/definitions/v1beta1.TaskRef" + }, + "customSpec": { + "description": "Spec is a specification of a custom task", + "$ref": "#/definitions/v1beta1.EmbeddedCustomRunSpec" + }, + "params": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.Param" + }, + "x-kubernetes-list-type": "atomic" + }, + "retries": { + "description": "Used for propagating retries count to custom tasks", + "type": "integer", + "format": "int32" + }, + "serviceAccountName": { + "type": "string", + "default": "" + }, + "status": { + "description": "Used for cancelling a customrun (and maybe more later on)", + "type": "string" + }, + "statusMessage": { + "description": "Status message for cancellation.", + "type": "string" + }, + "timeout": { + "description": "Time after which the custom-task times out. Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "$ref": "#/definitions/v1.Duration" + }, + "workspaces": { + "description": "Workspaces is a list of WorkspaceBindings from volumes to workspaces.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.WorkspaceBinding" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1beta1.EmbeddedCustomRunSpec": { + "description": "EmbeddedCustomRunSpec allows custom task definitions to be embedded", + "type": "object", + "properties": { + "apiVersion": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1beta1.PipelineTaskMetadata" + }, + "spec": { + "description": "Spec is a specification of a custom task", + "default": {}, + "$ref": "#/definitions/k8s.io.apimachinery.pkg.runtime.RawExtension" + } + } + }, "v1beta1.EmbeddedTask": { "description": "EmbeddedTask is used to define a Task inline within a Pipeline's PipelineTasks.", "type": "object", @@ -552,6 +799,21 @@ } } }, + "v1beta1.Matrix": { + "description": "Matrix is used to fan out Tasks in a Pipeline", + "type": "object", + "properties": { + "params": { + "description": "Params is a list of parameters used to fan out the pipelineTask Params takes only `Parameters` of type `\"array\"` Each array element is supplied to the `PipelineTask` by substituting `params` of type `\"string\"` in the underlying `Task`. The names of the `params` in the `Matrix` must match the names of the `params` in the underlying `Task` that they will be substituting.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1beta1.Param" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, "v1beta1.Param": { "description": "Param declares an ParamValues to use for the parameter called name.", "type": "object", @@ -1015,6 +1277,10 @@ "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, "observedGeneration": { "description": "ObservedGeneration is the 'Generation' of the Service that was last processed by the controller.", "type": "integer", @@ -1079,6 +1345,10 @@ "description": "CompletionTime is the time the PipelineRun completed.", "$ref": "#/definitions/v1.Time" }, + "finallyStartTime": { + "description": "FinallyStartTime is when all non-finally tasks have been completed and only finally tasks are being executed.", + "$ref": "#/definitions/v1.Time" + }, "pipelineResults": { "description": "PipelineResults are the list of results written out by the pipeline task's containers", "type": "array", @@ -1214,12 +1484,7 @@ "properties": { "matrix": { "description": "Matrix declares parameters used to fan out this task.", - "type": "array", - "items": { - "default": {}, - "$ref": "#/definitions/v1beta1.Param" - }, - "x-kubernetes-list-type": "atomic" + "$ref": "#/definitions/v1beta1.Matrix" }, "name": { "description": "Name is the name of this task within the context of a Pipeline. Name is used as a coordinate with the `from` and `runAfter` fields to establish the execution order of tasks relative to one another.", @@ -1472,42 +1737,22 @@ } } }, - "v1beta1.ResolverParam": { - "description": "ResolverParam is a single parameter passed to a resolver.", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "Name is the name of the parameter that will be passed to the resolver.", - "type": "string", - "default": "" - }, - "value": { - "description": "Value is the string value of the parameter that will be passed to the resolver.", - "type": "string", - "default": "" - } - } - }, "v1beta1.ResolverRef": { "description": "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", "type": "object", "properties": { - "resolver": { - "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", - "type": "string" - }, - "resource": { - "description": "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + "params": { + "description": "Params contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", "type": "array", "items": { "default": {}, - "$ref": "#/definitions/v1beta1.ResolverParam" + "$ref": "#/definitions/v1beta1.Param" }, "x-kubernetes-list-type": "atomic" + }, + "resolver": { + "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + "type": "string" } } }, diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index aa2c2387ed..7a5c103aa6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -27,7 +27,6 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" "github.com/tektoncd/pipeline/pkg/apis/version" - "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/substitution" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -246,9 +245,9 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi if s.OnError != "" { if !isParamRefs(string(s.OnError)) && s.OnError != Continue && s.OnError != StopAndFail { errs = errs.Also(&apis.FieldError{ - Message: fmt.Sprintf("invalid value: %v", s.OnError), + Message: fmt.Sprintf("invalid value: \"%v\"", s.OnError), Paths: []string{"onError"}, - Details: "Task step onError must be either continue or stopAndFail", + Details: "Task step onError must be either \"continue\" or \"stopAndFail\"", }) } } @@ -281,13 +280,13 @@ func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis // when the enable-api-fields feature gate is not "alpha". errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) } - errs = errs.Also(p.ValidateType()) + errs = errs.Also(p.ValidateType(ctx)) } return errs } // ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type -func (p ParamSpec) ValidateType() *apis.FieldError { +func (p ParamSpec) ValidateType(ctx context.Context) *apis.FieldError { // Ensure param has a valid type. validType := false for _, allowedType := range AllParamTypes { @@ -312,15 +311,19 @@ func (p ParamSpec) ValidateType() *apis.FieldError { } // Check object type and its PropertySpec type - return p.ValidateObjectType() + return p.ValidateObjectType(ctx) } // ValidateObjectType checks that object type parameter does not miss the // definition of `properties` section and the type of a PropertySpec is allowed. // (Currently, only string is allowed) -func (p ParamSpec) ValidateObjectType() *apis.FieldError { +func (p ParamSpec) ValidateObjectType(ctx context.Context) *apis.FieldError { if p.Type == ParamTypeObject && p.Properties == nil { - return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + // If this we are not skipping validation checks due to propagated params + // then properties field is required. + if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } } invalidKeys := []string{} @@ -366,10 +369,9 @@ func ValidateParameterVariables(ctx context.Context, steps []Step, params []Para errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) if config.ValidateParameterVariablesAndWorkspaces(ctx) == true { errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) } - errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) - errs = errs.Also(validateObjectDefault(objectParamSpecs)) - return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) + return errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) } func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { @@ -425,45 +427,6 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) } -// validateObjectDefault validates the keys of all the object params within a -// slice of ParamSpecs are provided in default iff the default section is provided. -func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { - for _, p := range objectParams { - errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) - } - return errs -} - -// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. -func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ParamValue) (errs *apis.FieldError) { - if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { - return nil - } - - neededKeys := []string{} - providedKeys := []string{} - - // collect all needed keys - for key := range properties { - neededKeys = append(neededKeys, key) - } - - // collect all provided keys - for key := range propertiesProvider.ObjectVal { - providedKeys = append(providedKeys, key) - } - - missings := list.DiffLeft(neededKeys, providedKeys) - if len(missings) != 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), - Paths: []string{fmt.Sprintf("properties"), fmt.Sprintf("default")}, - } - } - - return nil -} - // validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings // i.e. param.objectParam, param.objectParam[*] func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go index 36206f968a..2bb4c92ba3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_conversion.go @@ -10,18 +10,38 @@ func (tr TaskRef) convertTo(ctx context.Context, sink *v1.TaskRef) { sink.Name = tr.Name sink.Kind = v1.TaskKind(tr.Kind) sink.APIVersion = tr.APIVersion - // TODO: handle bundle in #4546 new := v1.ResolverRef{} tr.ResolverRef.convertTo(ctx, &new) sink.ResolverRef = new + tr.convertBundleToResolver(sink) } func (tr *TaskRef) convertFrom(ctx context.Context, source v1.TaskRef) { tr.Name = source.Name tr.Kind = TaskKind(source.Kind) tr.APIVersion = source.APIVersion - // TODO: handle bundle in #4546 new := ResolverRef{} new.convertFrom(ctx, source.ResolverRef) tr.ResolverRef = new } + +// convertBundleToResolver converts v1beta1 bundle string to a remote reference with the bundle resolver in v1. +// The conversion from Resolver to Bundle is not being supported since remote resolution would be turned on by +// default and it will be in beta before the stored version of CRD getting swapped to v1. +func (tr TaskRef) convertBundleToResolver(sink *v1.TaskRef) { + if tr.Bundle != "" { + sink.ResolverRef = v1.ResolverRef{ + Resolver: "bundles", + Params: []v1.Param{{ + Name: "bundle", + Value: v1.ParamValue{StringVal: tr.Bundle}, + }, { + Name: "name", + Value: v1.ParamValue{StringVal: tr.Name}, + }, { + Name: "kind", + Value: v1.ParamValue{StringVal: "Task"}, + }}, + } + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index 652eed6cff..0abaa47ce3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -32,32 +32,39 @@ func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { return } - switch { - case ref.Resolver != "": - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + if ref.Resolver != "" || ref.Params != nil { + if ref.Resolver != "" { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) + } } - if ref.Bundle != "" { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) + if ref.Params != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "params", config.AlphaAPIFields).ViaField("params")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "params")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "params")) + } + if ref.Resolver == "" { + errs = errs.Also(apis.ErrMissingField("resolver")) + } + errs = errs.Also(ValidateParameters(ctx, ref.Params)) + errs = errs.Also(validateResolutionParamTypes(ref.Params).ViaField("params")) } - case ref.Resource != nil: - errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) - if ref.Name != "" { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) + } else { + if ref.Name == "" { + errs = errs.Also(apis.ErrMissingField("name")) } if ref.Bundle != "" { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) - } - if ref.Resolver == "" { - errs = errs.Also(apis.ErrMissingField("resolver")) - } - case ref.Name == "": - errs = errs.Also(apis.ErrMissingField("name")) - case ref.Bundle != "": - errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } } } return diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index 3285748e2c..1d31079f67 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -20,23 +20,194 @@ import ( "context" "fmt" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/version" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) var _ apis.Convertible = (*TaskRun)(nil) // ConvertTo implements apis.Convertible -func (tr *TaskRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { +func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error { if apis.IsInDelete(ctx) { return nil } - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) + switch sink := to.(type) { + case *v1.TaskRun: + sink.ObjectMeta = tr.ObjectMeta + if err := serializeTaskRunResources(&sink.ObjectMeta, &tr.Spec); err != nil { + return err + } + return tr.Spec.ConvertTo(ctx, &sink.Spec) + default: + return fmt.Errorf("unknown version, got: %T", sink) + } +} + +// ConvertTo implements apis.Convertible +func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1.TaskRunSpec) error { + if trs.Debug != nil { + sink.Debug = &v1.TaskRunDebug{} + trs.Debug.convertTo(ctx, sink.Debug) + } + sink.Params = nil + for _, p := range trs.Params { + new := v1.Param{} + p.convertTo(ctx, &new) + sink.Params = append(sink.Params, new) + } + sink.ServiceAccountName = trs.ServiceAccountName + if trs.TaskRef != nil { + sink.TaskRef = &v1.TaskRef{} + trs.TaskRef.convertTo(ctx, sink.TaskRef) + } + if trs.TaskSpec != nil { + sink.TaskSpec = &v1.TaskSpec{} + err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec) + if err != nil { + return err + } + } + sink.Status = v1.TaskRunSpecStatus(trs.Status) + sink.StatusMessage = v1.TaskRunSpecStatusMessage(trs.StatusMessage) + sink.Timeout = trs.Timeout + sink.PodTemplate = trs.PodTemplate + sink.Workspaces = nil + for _, w := range trs.Workspaces { + new := v1.WorkspaceBinding{} + w.convertTo(ctx, &new) + sink.Workspaces = append(sink.Workspaces, new) + } + sink.StepOverrides = nil + for _, so := range trs.StepOverrides { + new := v1.TaskRunStepOverride{} + so.convertTo(ctx, &new) + sink.StepOverrides = append(sink.StepOverrides, new) + } + sink.SidecarOverrides = nil + for _, so := range trs.SidecarOverrides { + new := v1.TaskRunSidecarOverride{} + so.convertTo(ctx, &new) + sink.SidecarOverrides = append(sink.SidecarOverrides, new) + } + sink.ComputeResources = trs.ComputeResources + return nil } // ConvertFrom implements apis.Convertible -func (tr *TaskRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { +func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error { if apis.IsInDelete(ctx) { return nil } - return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) + switch source := from.(type) { + case *v1.TaskRun: + tr.ObjectMeta = source.ObjectMeta + if err := deserializeTaskRunResources(&tr.ObjectMeta, &tr.Spec); err != nil { + return err + } + return tr.Spec.ConvertFrom(ctx, &source.Spec) + default: + return fmt.Errorf("unknown version, got: %T", tr) + } +} + +// ConvertFrom implements apis.Convertible +func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1.TaskRunSpec) error { + if source.Debug != nil { + newDebug := TaskRunDebug{} + newDebug.convertFrom(ctx, *source.Debug) + trs.Debug = &newDebug + } + trs.Params = nil + for _, p := range source.Params { + new := Param{} + new.convertFrom(ctx, p) + trs.Params = append(trs.Params, new) + } + trs.ServiceAccountName = source.ServiceAccountName + if source.TaskRef != nil { + newTaskRef := TaskRef{} + newTaskRef.convertFrom(ctx, *source.TaskRef) + trs.TaskRef = &newTaskRef + } + if source.TaskSpec != nil { + newTaskSpec := TaskSpec{} + err := newTaskSpec.ConvertFrom(ctx, source.TaskSpec) + if err != nil { + return err + } + trs.TaskSpec = &newTaskSpec + } + trs.Status = TaskRunSpecStatus(source.Status) + trs.StatusMessage = TaskRunSpecStatusMessage(source.StatusMessage) + trs.Timeout = source.Timeout + trs.PodTemplate = source.PodTemplate + trs.Workspaces = nil + for _, w := range source.Workspaces { + new := WorkspaceBinding{} + new.convertFrom(ctx, w) + trs.Workspaces = append(trs.Workspaces, new) + } + trs.StepOverrides = nil + for _, so := range source.StepOverrides { + new := TaskRunStepOverride{} + new.convertFrom(ctx, so) + trs.StepOverrides = append(trs.StepOverrides, new) + } + trs.SidecarOverrides = nil + for _, so := range source.SidecarOverrides { + new := TaskRunSidecarOverride{} + new.convertFrom(ctx, so) + trs.SidecarOverrides = append(trs.SidecarOverrides, new) + } + trs.ComputeResources = source.ComputeResources + return nil +} + +func (trd TaskRunDebug) convertTo(ctx context.Context, sink *v1.TaskRunDebug) { + sink.Breakpoint = trd.Breakpoint +} + +func (trd *TaskRunDebug) convertFrom(ctx context.Context, source v1.TaskRunDebug) { + trd.Breakpoint = source.Breakpoint +} + +func (trso TaskRunStepOverride) convertTo(ctx context.Context, sink *v1.TaskRunStepOverride) { + sink.Name = trso.Name + sink.Resources = trso.Resources +} + +func (trso *TaskRunStepOverride) convertFrom(ctx context.Context, source v1.TaskRunStepOverride) { + trso.Name = source.Name + trso.Resources = source.Resources +} + +func (trso TaskRunSidecarOverride) convertTo(ctx context.Context, sink *v1.TaskRunSidecarOverride) { + sink.Name = trso.Name + sink.Resources = trso.Resources +} + +func (trso *TaskRunSidecarOverride) convertFrom(ctx context.Context, source v1.TaskRunSidecarOverride) { + trso.Name = source.Name + trso.Resources = source.Resources +} + +func serializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { + if spec.Resources == nil { + return nil + } + return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) +} + +func deserializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { + resources := &TaskRunResources{} + err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) + if err != nil { + return err + } + if resources.Inputs != nil || resources.Outputs != nil { + spec.Resources = resources + } + return nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index 98cd2327e2..901d761ee5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" "knative.dev/pkg/apis" duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" ) @@ -101,6 +101,8 @@ const ( // TaskRunCancelledByPipelineMsg indicates that the PipelineRun of which this // TaskRun was a part of has been cancelled. TaskRunCancelledByPipelineMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has been cancelled." + // TaskRunCancelledByPipelineTimeoutMsg indicates that the TaskRun was cancelled because the PipelineRun running it timed out. + TaskRunCancelledByPipelineTimeoutMsg TaskRunSpecStatusMessage = "TaskRun cancelled as the PipelineRun it belongs to has timed out." ) // TaskRunDebug defines the breakpoint config for a particular TaskRun @@ -466,20 +468,6 @@ func (tr *TaskRun) GetNamespacedName() types.NamespacedName { return types.NamespacedName{Namespace: tr.Namespace, Name: tr.Name} } -// IsPartOfPipeline return true if TaskRun is a part of a Pipeline. -// It also return the name of Pipeline and PipelineRun -func (tr *TaskRun) IsPartOfPipeline() (bool, string, string) { - if tr == nil || len(tr.Labels) == 0 { - return false, "", "" - } - - if pl, ok := tr.Labels[pipeline.PipelineLabelKey]; ok { - return true, pl, tr.Labels[pipeline.PipelineRunLabelKey] - } - - return false, "", "" -} - // HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is // used for creating PersistentVolumeClaims with an OwnerReference for each run func (tr *TaskRun) HasVolumeClaimTemplate() bool { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index 72a647af0f..5809d92cdb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -114,32 +114,82 @@ func (ts *TaskRunSpec) validateInlineParameters(ctx context.Context) (errs *apis if ts.TaskSpec == nil { return errs } - var paramSpec []ParamSpec + paramSpecForValidation := make(map[string]ParamSpec) for _, p := range ts.Params { - pSpec := ParamSpec{ - Name: p.Name, - Default: &p.Value, - } - paramSpec = append(paramSpec, pSpec) + paramSpecForValidation = createParamSpecFromParam(p, paramSpecForValidation) } + for _, p := range ts.TaskSpec.Params { - skip := false - for _, ps := range paramSpec { - if ps.Name == p.Name { - skip = true - break - } - } - if !skip { - paramSpec = append(paramSpec, p) + var err *apis.FieldError + paramSpecForValidation, err = combineParamSpec(p, paramSpecForValidation) + if err != nil { + errs = errs.Also(err) } } + var paramSpec []ParamSpec + for _, v := range paramSpecForValidation { + paramSpec = append(paramSpec, v) + } if ts.TaskSpec != nil && ts.TaskSpec.Steps != nil { + errs = errs.Also(ValidateParameterTypes(ctx, paramSpec)) errs = errs.Also(ValidateParameterVariables(config.SkipValidationDueToPropagatedParametersAndWorkspaces(ctx, false), ts.TaskSpec.Steps, paramSpec)) } return errs } +func createParamSpecFromParam(p Param, paramSpecForValidation map[string]ParamSpec) map[string]ParamSpec { + value := p.Value + pSpec := ParamSpec{ + Name: p.Name, + Default: &value, + Type: p.Value.Type, + } + if p.Value.ObjectVal != nil { + pSpec.Properties = make(map[string]PropertySpec) + prop := make(map[string]PropertySpec) + for k := range p.Value.ObjectVal { + prop[k] = PropertySpec{Type: ParamTypeString} + } + pSpec.Properties = prop + } + paramSpecForValidation[p.Name] = pSpec + return paramSpecForValidation +} + +func combineParamSpec(p ParamSpec, paramSpecForValidation map[string]ParamSpec) (map[string]ParamSpec, *apis.FieldError) { + if pSpec, ok := paramSpecForValidation[p.Name]; ok { + // Merge defaults with provided values in the taskrun. + if p.Default != nil && p.Default.ObjectVal != nil { + for k, v := range p.Default.ObjectVal { + if pSpec.Default.ObjectVal == nil { + pSpec.Default.ObjectVal = map[string]string{k: v} + } else { + pSpec.Default.ObjectVal[k] = v + } + } + // If Default values of object type are provided then Properties must also be fully declared. + if p.Properties == nil { + return paramSpecForValidation, apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + } + + // Properties must be defined if paramSpec is of object Type + if pSpec.Type == ParamTypeObject { + if p.Properties == nil { + return paramSpecForValidation, apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + // Expect Properties to be complete + pSpec.Properties = p.Properties + } + paramSpecForValidation[p.Name] = pSpec + } else { + // No values provided by task run but found a paramSpec declaration. + // Expect it to be fully speced out. + paramSpecForValidation[p.Name] = p + } + return paramSpecForValidation, nil +} + // validateDebug func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) { breakpointOnFailure := "onFailure" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go index b5a0b1c8e9..17bb55c56c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go @@ -32,8 +32,7 @@ var validWhenOperators = []string{ } func (wes WhenExpressions) validate() *apis.FieldError { - errs := wes.validateWhenExpressionsFields().ViaField("when") - return errs.Also(wes.validateTaskResultsVariables().ViaField("when")) + return wes.validateWhenExpressionsFields().ViaField("when") } func (wes WhenExpressions) validateWhenExpressionsFields() (errs *apis.FieldError) { @@ -57,23 +56,6 @@ func (we *WhenExpression) validateWhenExpressionFields() *apis.FieldError { return nil } -func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { - for idx, we := range wes { - expressions, ok := we.GetVarSubstitutionExpressions() - if ok { - if LooksLikeContainsResultRefs(expressions) { - expressions = filter(expressions, looksLikeResultRef) - resultRefs := NewResultRefs(expressions) - if len(expressions) != len(resultRefs) { - message := fmt.Sprintf("expected all of the expressions %v to be result expressions but only %v were", expressions, resultRefs) - return apis.ErrInvalidValue(message, apis.CurrentField).ViaIndex(idx) - } - } - } - } - return nil -} - func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, we := range wes { errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go index 4a71f6a7ed..727e8e6f3a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_conversion.go @@ -55,3 +55,27 @@ func (w *WorkspacePipelineTaskBinding) convertFrom(ctx context.Context, source v w.Workspace = source.Workspace w.SubPath = source.SubPath } + +func (w WorkspaceBinding) convertTo(ctx context.Context, sink *v1.WorkspaceBinding) { + sink.Name = w.Name + sink.SubPath = w.SubPath + sink.VolumeClaimTemplate = w.VolumeClaimTemplate + sink.PersistentVolumeClaim = w.PersistentVolumeClaim + sink.EmptyDir = w.EmptyDir + sink.ConfigMap = w.ConfigMap + sink.Secret = w.Secret + sink.Projected = w.Projected + sink.CSI = w.CSI +} + +func (w *WorkspaceBinding) convertFrom(ctx context.Context, source v1.WorkspaceBinding) { + w.Name = source.Name + w.SubPath = source.SubPath + w.VolumeClaimTemplate = source.VolumeClaimTemplate + w.PersistentVolumeClaim = source.PersistentVolumeClaim + w.EmptyDir = source.EmptyDir + w.ConfigMap = source.ConfigMap + w.Secret = source.Secret + w.Projected = source.Projected + w.CSI = source.CSI +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index 3cd690fef6..60c8779025 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -25,8 +25,8 @@ import ( pod "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" runv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -151,6 +151,131 @@ func (in *ClusterTaskList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRun) DeepCopyInto(out *CustomRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRun. +func (in *CustomRun) DeepCopy() *CustomRun { + if in == nil { + return nil + } + out := new(CustomRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRunList) DeepCopyInto(out *CustomRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunList. +func (in *CustomRunList) DeepCopy() *CustomRunList { + if in == nil { + return nil + } + out := new(CustomRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CustomRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRunSpec) DeepCopyInto(out *CustomRunSpec) { + *out = *in + if in.CustomRef != nil { + in, out := &in.CustomRef, &out.CustomRef + *out = new(TaskRef) + (*in).DeepCopyInto(*out) + } + if in.CustomSpec != nil { + in, out := &in.CustomSpec, &out.CustomSpec + *out = new(EmbeddedCustomRunSpec) + (*in).DeepCopyInto(*out) + } + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunSpec. +func (in *CustomRunSpec) DeepCopy() *CustomRunSpec { + if in == nil { + return nil + } + out := new(CustomRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedCustomRunSpec) DeepCopyInto(out *EmbeddedCustomRunSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Metadata.DeepCopyInto(&out.Metadata) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedCustomRunSpec. +func (in *EmbeddedCustomRunSpec) DeepCopy() *EmbeddedCustomRunSpec { + if in == nil { + return nil + } + out := new(EmbeddedCustomRunSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EmbeddedTask) DeepCopyInto(out *EmbeddedTask) { *out = *in @@ -190,7 +315,7 @@ func (in *InternalTaskModifier) DeepCopyInto(out *InternalTaskModifier) { } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -208,6 +333,29 @@ func (in *InternalTaskModifier) DeepCopy() *InternalTaskModifier { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Matrix) DeepCopyInto(out *Matrix) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Matrix. +func (in *Matrix) DeepCopy() *Matrix { + if in == nil { + return nil + } + out := new(Matrix) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Param) DeepCopyInto(out *Param) { *out = *in @@ -589,7 +737,7 @@ func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } if in.PodTemplate != nil { @@ -709,6 +857,10 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.FinallyStartTime != nil { + in, out := &in.FinallyStartTime, &out.FinallyStartTime + *out = (*in).DeepCopy() + } return } @@ -843,10 +995,8 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Matrix != nil { in, out := &in.Matrix, &out.Matrix - *out = make([]Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + *out = new(Matrix) + (*in).DeepCopyInto(*out) } if in.Workspaces != nil { in, out := &in.Workspaces, &out.Workspaces @@ -855,7 +1005,7 @@ func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } return @@ -1049,7 +1199,7 @@ func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { } if in.ComputeResources != nil { in, out := &in.ComputeResources, &out.ComputeResources - *out = new(v1.ResourceRequirements) + *out = new(corev1.ResourceRequirements) (*in).DeepCopyInto(*out) } return @@ -1097,29 +1247,15 @@ func (in *PropertySpec) DeepCopy() *PropertySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParam. -func (in *ResolverParam) DeepCopy() *ResolverParam { - if in == nil { - return nil - } - out := new(ResolverParam) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { *out = *in - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = make([]ResolverParam, len(*in)) - copy(*out, *in) + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -1165,19 +1301,19 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { } if in.Ports != nil { in, out := &in.Ports, &out.Ports - *out = make([]v1.ContainerPort, len(*in)) + *out = make([]corev1.ContainerPort, len(*in)) copy(*out, *in) } if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) + *out = make([]corev1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1185,39 +1321,39 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { in.Resources.DeepCopyInto(&out.Resources) if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + *out = make([]corev1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeDevices != nil { in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]v1.VolumeDevice, len(*in)) + *out = make([]corev1.VolumeDevice, len(*in)) copy(*out, *in) } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.ReadinessProbe != nil { in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.StartupProbe != nil { in, out := &in.StartupProbe, &out.StartupProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle - *out = new(v1.Lifecycle) + *out = new(corev1.Lifecycle) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) + *out = new(corev1.SecurityContext) (*in).DeepCopyInto(*out) } if in.Workspaces != nil { @@ -1293,19 +1429,19 @@ func (in *Step) DeepCopyInto(out *Step) { } if in.DeprecatedPorts != nil { in, out := &in.DeprecatedPorts, &out.DeprecatedPorts - *out = make([]v1.ContainerPort, len(*in)) + *out = make([]corev1.ContainerPort, len(*in)) copy(*out, *in) } if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) + *out = make([]corev1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1313,44 +1449,44 @@ func (in *Step) DeepCopyInto(out *Step) { in.Resources.DeepCopyInto(&out.Resources) if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + *out = make([]corev1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeDevices != nil { in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]v1.VolumeDevice, len(*in)) + *out = make([]corev1.VolumeDevice, len(*in)) copy(*out, *in) } if in.DeprecatedLivenessProbe != nil { in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedReadinessProbe != nil { in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedStartupProbe != nil { in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedLifecycle != nil { in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle - *out = new(v1.Lifecycle) + *out = new(corev1.Lifecycle) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) + *out = new(corev1.SecurityContext) (*in).DeepCopyInto(*out) } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } if in.Workspaces != nil { @@ -1429,19 +1565,19 @@ func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { } if in.DeprecatedPorts != nil { in, out := &in.DeprecatedPorts, &out.DeprecatedPorts - *out = make([]v1.ContainerPort, len(*in)) + *out = make([]corev1.ContainerPort, len(*in)) copy(*out, *in) } if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) + *out = make([]corev1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1449,39 +1585,39 @@ func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { in.Resources.DeepCopyInto(&out.Resources) if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + *out = make([]corev1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeDevices != nil { in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]v1.VolumeDevice, len(*in)) + *out = make([]corev1.VolumeDevice, len(*in)) copy(*out, *in) } if in.DeprecatedLivenessProbe != nil { in, out := &in.DeprecatedLivenessProbe, &out.DeprecatedLivenessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedReadinessProbe != nil { in, out := &in.DeprecatedReadinessProbe, &out.DeprecatedReadinessProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedStartupProbe != nil { in, out := &in.DeprecatedStartupProbe, &out.DeprecatedStartupProbe - *out = new(v1.Probe) + *out = new(corev1.Probe) (*in).DeepCopyInto(*out) } if in.DeprecatedLifecycle != nil { in, out := &in.DeprecatedLifecycle, &out.DeprecatedLifecycle - *out = new(v1.Lifecycle) + *out = new(corev1.Lifecycle) (*in).DeepCopyInto(*out) } if in.SecurityContext != nil { in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) + *out = new(corev1.SecurityContext) (*in).DeepCopyInto(*out) } return @@ -1893,7 +2029,7 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.Timeout != nil { in, out := &in.Timeout, &out.Timeout - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } if in.PodTemplate != nil { @@ -1924,7 +2060,7 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { } if in.ComputeResources != nil { in, out := &in.ComputeResources, &out.ComputeResources - *out = new(v1.ResourceRequirements) + *out = new(corev1.ResourceRequirements) (*in).DeepCopyInto(*out) } return @@ -2068,7 +2204,7 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2115,17 +2251,17 @@ func (in *TimeoutFields) DeepCopyInto(out *TimeoutFields) { *out = *in if in.Pipeline != nil { in, out := &in.Pipeline, &out.Pipeline - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } if in.Tasks != nil { in, out := &in.Tasks, &out.Tasks - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } if in.Finally != nil { in, out := &in.Finally, &out.Finally - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } return @@ -2189,37 +2325,37 @@ func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { *out = *in if in.VolumeClaimTemplate != nil { in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate - *out = new(v1.PersistentVolumeClaim) + *out = new(corev1.PersistentVolumeClaim) (*in).DeepCopyInto(*out) } if in.PersistentVolumeClaim != nil { in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(v1.PersistentVolumeClaimVolumeSource) + *out = new(corev1.PersistentVolumeClaimVolumeSource) **out = **in } if in.EmptyDir != nil { in, out := &in.EmptyDir, &out.EmptyDir - *out = new(v1.EmptyDirVolumeSource) + *out = new(corev1.EmptyDirVolumeSource) (*in).DeepCopyInto(*out) } if in.ConfigMap != nil { in, out := &in.ConfigMap, &out.ConfigMap - *out = new(v1.ConfigMapVolumeSource) + *out = new(corev1.ConfigMapVolumeSource) (*in).DeepCopyInto(*out) } if in.Secret != nil { in, out := &in.Secret, &out.Secret - *out = new(v1.SecretVolumeSource) + *out = new(corev1.SecretVolumeSource) (*in).DeepCopyInto(*out) } if in.Projected != nil { in, out := &in.Projected, &out.Projected - *out = new(v1.ProjectedVolumeSource) + *out = new(corev1.ProjectedVolumeSource) (*in).DeepCopyInto(*out) } if in.CSI != nil { in, out := &in.CSI, &out.CSI - *out = new(v1.CSIVolumeSource) + *out = new(corev1.CSIVolumeSource) (*in).DeepCopyInto(*out) } return diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/register.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/register.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/register.go diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/doc.go new file mode 100644 index 0000000000..43214e8cc2 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=resolution.tekton.dev +package v1alpha1 diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/register.go similarity index 97% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/register.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/register.go index 7962db3f0c..4911249e4c 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/register.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/tektoncd/resolution/pkg/apis/resolution" + "github.com/tektoncd/pipeline/pkg/apis/resolution" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_conversion.go new file mode 100644 index 0000000000..25a5b98ec6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_conversion.go @@ -0,0 +1,95 @@ +/* + Copyright 2022 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha1 + +import ( + "context" + "fmt" + "strings" + + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + "knative.dev/pkg/apis" +) + +var _ apis.Convertible = (*ResolutionRequest)(nil) + +// ConvertTo implements apis.Convertible +func (rr *ResolutionRequest) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + switch sink := sink.(type) { + case *v1beta1.ResolutionRequest: + sink.ObjectMeta = rr.ObjectMeta + return rr.Spec.ConvertTo(ctx, &sink.Spec) + default: + return fmt.Errorf("unknown version, got: %T", sink) + } +} + +// ConvertTo converts a v1alpha1.ResolutionRequestSpec to a v1beta1.ResolutionRequestSpec +func (rrs *ResolutionRequestSpec) ConvertTo(ctx context.Context, sink *v1beta1.ResolutionRequestSpec) error { + for k, v := range rrs.Parameters { + sink.Params = append(sink.Params, pipelinev1beta1.Param{ + Name: k, + Value: pipelinev1beta1.ParamValue{ + Type: pipelinev1beta1.ParamTypeString, + StringVal: v, + }, + }) + } + + return nil +} + +// ConvertFrom implements apis.Convertible +func (rr *ResolutionRequest) ConvertFrom(ctx context.Context, from apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + switch from := from.(type) { + case *v1beta1.ResolutionRequest: + rr.ObjectMeta = from.ObjectMeta + return rr.Spec.ConvertFrom(ctx, &from.Spec) + default: + return fmt.Errorf("unknown version, got: %T", from) + } +} + +// ConvertFrom converts a v1beta1.ResolutionRequestSpec to a v1alpha1.ResolutionRequestSpec +func (rrs *ResolutionRequestSpec) ConvertFrom(ctx context.Context, from *v1beta1.ResolutionRequestSpec) error { + var nonStringParams []string + + for _, p := range from.Params { + if p.Value.Type != pipelinev1beta1.ParamTypeString { + nonStringParams = append(nonStringParams, p.Name) + } else { + if rrs.Parameters == nil { + rrs.Parameters = make(map[string]string) + } + rrs.Parameters[p.Name] = p.Value.StringVal + } + } + + if len(nonStringParams) > 0 { + return fmt.Errorf("cannot convert v1beta1 to v1alpha, non-string type parameter(s) found: %s", strings.Join(nonStringParams, ", ")) + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go new file mode 100644 index 0000000000..1b1c9ad9ee --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import "context" + +// ManagedByLabelKey is the label key used to mark what is managing this resource +const ManagedByLabelKey = "app.kubernetes.io/managed-by" + +// SetDefaults walks a ResolutionRequest object and sets any default +// values that are required to be set before a reconciler sees it. +func (rr *ResolutionRequest) SetDefaults(ctx context.Context) { + if rr.TypeMeta.Kind == "" { + rr.TypeMeta.Kind = "ResolutionRequest" + } + if rr.TypeMeta.APIVersion == "" { + rr.TypeMeta.APIVersion = "resolution.tekton.dev/v1alpha1" + } +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go similarity index 75% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go index 80766151f9..2154cb4f8c 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_lifecycle.go @@ -1,7 +1,23 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1alpha1 import ( - resolutioncommon "github.com/tektoncd/resolution/pkg/common" + resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" "k8s.io/apimachinery/pkg/runtime/schema" "knative.dev/pkg/apis" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_types.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_types.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_types.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_validation.go similarity index 90% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_validation.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_validation.go index afe0e22ad2..28e008a8f9 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/resolution_request_validation.go @@ -19,7 +19,7 @@ package v1alpha1 import ( "context" - "github.com/tektoncd/resolution/pkg/common" + "github.com/tektoncd/pipeline/pkg/resolution/common" "knative.dev/pkg/apis" ) @@ -31,7 +31,7 @@ func (rr *ResolutionRequest) Validate(ctx context.Context) (errs *apis.FieldErro } // Validate checks the the spec field of a ResolutionRequest is valid. -func (rs *ResolutionRequestSpec) Validate(ctx context.Context) *apis.FieldError { +func (rs *ResolutionRequestSpec) Validate(ctx context.Context) (errs *apis.FieldError) { return nil } diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go similarity index 99% rename from vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go index 0f75efa220..db02bbff9b 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/doc.go new file mode 100644 index 0000000000..b47a2951b3 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=resolution.tekton.dev +package v1beta1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/register.go new file mode 100644 index 0000000000..61eee0858d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "github.com/tektoncd/pipeline/pkg/apis/resolution" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: resolution.GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder builds a scheme with the types known to the package. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme adds the types known to this package to an existing schema. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ResolutionRequest{}, + &ResolutionRequestList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_conversion.go new file mode 100644 index 0000000000..16f57ac216 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_conversion.go @@ -0,0 +1,43 @@ +/* + Copyright 2022 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +var _ apis.Convertible = (*ResolutionRequest)(nil) + +// ConvertTo implements apis.Convertible +func (rr *ResolutionRequest) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (rr *ResolutionRequest) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } + return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_defaults.go new file mode 100644 index 0000000000..2d0a2453ca --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_defaults.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "context" + +// ManagedByLabelKey is the label key used to mark what is managing this resource +const ManagedByLabelKey = "app.kubernetes.io/managed-by" + +// SetDefaults walks a ResolutionRequest object and sets any default +// values that are required to be set before a reconciler sees it. +func (rr *ResolutionRequest) SetDefaults(ctx context.Context) { + if rr.TypeMeta.Kind == "" { + rr.TypeMeta.Kind = "ResolutionRequest" + } + if rr.TypeMeta.APIVersion == "" { + rr.TypeMeta.APIVersion = "resolution.tekton.dev/v1alpha1" + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_lifecycle.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_lifecycle.go new file mode 100644 index 0000000000..4cf709ea5e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_lifecycle.go @@ -0,0 +1,71 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" +) + +// ResolutionRequests only have apis.ConditionSucceeded for now. +var resolutionRequestCondSet = apis.NewBatchConditionSet() + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*ResolutionRequest) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("ResolutionRequest") +} + +// GetConditionSet implements KRShaped. +func (*ResolutionRequest) GetConditionSet() apis.ConditionSet { + return resolutionRequestCondSet +} + +// HasStarted returns whether a ResolutionRequests Status is considered to +// be in-progress. +func (rr *ResolutionRequest) HasStarted() bool { + return rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() +} + +// IsDone returns whether a ResolutionRequests Status is considered to be +// in a completed state, independent of success/failure. +func (rr *ResolutionRequest) IsDone() bool { + finalStateIsUnknown := rr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() + return !finalStateIsUnknown +} + +// InitializeConditions set ths initial values of the conditions. +func (s *ResolutionRequestStatus) InitializeConditions() { + resolutionRequestCondSet.Manage(s).InitializeConditions() +} + +// MarkFailed sets the Succeeded condition to False with an accompanying +// error message. +func (s *ResolutionRequestStatus) MarkFailed(reason, message string) { + resolutionRequestCondSet.Manage(s).MarkFalse(apis.ConditionSucceeded, reason, message) +} + +// MarkSucceeded sets the Succeeded condition to True. +func (s *ResolutionRequestStatus) MarkSucceeded() { + resolutionRequestCondSet.Manage(s).MarkTrue(apis.ConditionSucceeded) +} + +// MarkInProgress updates the Succeeded condition to Unknown with an +// accompanying message. +func (s *ResolutionRequestStatus) MarkInProgress(message string) { + resolutionRequestCondSet.Manage(s).MarkUnknown(apis.ConditionSucceeded, resolutioncommon.ReasonResolutionInProgress, message) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_types.go new file mode 100644 index 0000000000..2fc3313930 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_types.go @@ -0,0 +1,87 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResolutionRequest is an object for requesting the content of +// a Tekton resource like a pipeline.yaml. +// +// +genclient +// +genreconciler +type ResolutionRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds the information for the request part of the resource request. + // +optional + Spec ResolutionRequestSpec `json:"spec,omitempty"` + + // Status communicates the state of the request and, ultimately, + // the content of the resolved resource. + // +optional + Status ResolutionRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResolutionRequestList is a list of ResolutionRequests. +type ResolutionRequestList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata"` + Items []ResolutionRequest `json:"items"` +} + +// ResolutionRequestSpec are all the fields in the spec of the +// ResolutionRequest CRD. +type ResolutionRequestSpec struct { + // Parameters are the runtime attributes passed to + // the resolver to help it figure out how to resolve the + // resource being requested. For example: repo URL, commit SHA, + // path to file, the kind of authentication to leverage, etc. + // +optional + Params []pipelinev1beta1.Param `json:"params,omitempty"` +} + +// ResolutionRequestStatus are all the fields in a ResolutionRequest's +// status subresource. +type ResolutionRequestStatus struct { + duckv1.Status `json:",inline"` + ResolutionRequestStatusFields `json:",inline"` +} + +// ResolutionRequestStatusFields are the ResolutionRequest-specific fields +// for the status subresource. +type ResolutionRequestStatusFields struct { + // Data is a string representation of the resolved content + // of the requested resource in-lined into the ResolutionRequest + // object. + Data string `json:"data"` +} + +// GetStatus implements KRShaped. +func (rr *ResolutionRequest) GetStatus() *duckv1.Status { + return &rr.Status.Status +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_validation.go new file mode 100644 index 0000000000..a9b1a72744 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/resolution_request_validation.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + + "github.com/tektoncd/pipeline/pkg/resolution/common" + "knative.dev/pkg/apis" +) + +// Validate checks that a submitted ResolutionRequest is structurally +// sound before the controller receives it. +func (rr *ResolutionRequest) Validate(ctx context.Context) (errs *apis.FieldError) { + errs = errs.Also(validateTypeLabel(rr)) + return errs.Also(rr.Spec.Validate(ctx).ViaField("spec")) +} + +// Validate checks the the spec field of a ResolutionRequest is valid. +func (rs *ResolutionRequestSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + return nil +} + +func validateTypeLabel(rr *ResolutionRequest) *apis.FieldError { + typeLabel := getTypeLabel(rr.ObjectMeta.Labels) + if typeLabel == "" { + return apis.ErrMissingField(common.LabelKeyResolverType).ViaField("labels").ViaField("meta") + } + return nil +} + +func getTypeLabel(labels map[string]string) string { + if labels == nil { + return "" + } + return labels[common.LabelKeyResolverType] +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..50a75e80be --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,145 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolutionRequest) DeepCopyInto(out *ResolutionRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequest. +func (in *ResolutionRequest) DeepCopy() *ResolutionRequest { + if in == nil { + return nil + } + out := new(ResolutionRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResolutionRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolutionRequestList) DeepCopyInto(out *ResolutionRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResolutionRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestList. +func (in *ResolutionRequestList) DeepCopy() *ResolutionRequestList { + if in == nil { + return nil + } + out := new(ResolutionRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResolutionRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolutionRequestSpec) DeepCopyInto(out *ResolutionRequestSpec) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]pipelinev1beta1.Param, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestSpec. +func (in *ResolutionRequestSpec) DeepCopy() *ResolutionRequestSpec { + if in == nil { + return nil + } + out := new(ResolutionRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolutionRequestStatus) DeepCopyInto(out *ResolutionRequestStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.ResolutionRequestStatusFields = in.ResolutionRequestStatusFields + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatus. +func (in *ResolutionRequestStatus) DeepCopy() *ResolutionRequestStatus { + if in == nil { + return nil + } + out := new(ResolutionRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolutionRequestStatusFields) DeepCopyInto(out *ResolutionRequestStatusFields) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolutionRequestStatusFields. +func (in *ResolutionRequestStatusFields) DeepCopy() *ResolutionRequestStatusFields { + if in == nil { + return nil + } + out := new(ResolutionRequestStatusFields) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/customrunstatus_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/customrunstatus_types.go new file mode 100644 index 0000000000..ff41f0e4c8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/customrunstatus_types.go @@ -0,0 +1,147 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "encoding/json" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +// This package contains common definitions needed by v1beta1.CustomRun and v1beta1.PipelineRun. + +// +k8s:deepcopy-gen=true + +// CustomRunStatus defines the observed state of CustomRun +type CustomRunStatus struct { + duckv1.Status `json:",inline"` + + // CustomRunStatusFields inlines the status fields. + CustomRunStatusFields `json:",inline"` +} + +// +k8s:deepcopy-gen=true + +// CustomRunStatusFields holds the fields of CustomRun's status. This is defined +// separately and inlined so that other types can readily consume these fields +// via duck typing. +type CustomRunStatusFields struct { + // StartTime is the time the build is actually started. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty"` + + // CompletionTime is the time the build completed. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty"` + + // Results reports any output result values to be consumed by later + // tasks in a pipeline. + // +optional + Results []CustomRunResult `json:"results,omitempty"` + + // RetriesStatus contains the history of CustomRunStatus, in case of a retry. + // +optional + RetriesStatus []CustomRunStatus `json:"retriesStatus,omitempty"` + + // ExtraFields holds arbitrary fields provided by the custom task + // controller. + ExtraFields runtime.RawExtension `json:"extraFields,omitempty"` +} + +// CustomRunResult used to describe the results of a task +type CustomRunResult struct { + // Name the given name + Name string `json:"name"` + // Value the given value of the result + Value string `json:"value"` +} + +var customRunCondSet = apis.NewBatchConditionSet() + +// GetCondition returns the Condition matching the given type. +func (r *CustomRunStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return customRunCondSet.Manage(r).GetCondition(t) +} + +// InitializeConditions will set all conditions in customRunCondSet to unknown +// and set the started time to the current time +func (r *CustomRunStatus) InitializeConditions() { + started := false + if r.StartTime.IsZero() { + r.StartTime = &metav1.Time{Time: time.Now()} + started = true + } + conditionManager := customRunCondSet.Manage(r) + conditionManager.InitializeConditions() + // Ensure the started reason is set for the "Succeeded" condition + if started { + initialCondition := conditionManager.GetCondition(apis.ConditionSucceeded) + initialCondition.Reason = "Started" + conditionManager.SetCondition(*initialCondition) + } +} + +// SetCondition sets the condition, unsetting previous conditions with the same +// type as necessary. +func (r *CustomRunStatus) SetCondition(newCond *apis.Condition) { + if newCond != nil { + customRunCondSet.Manage(r).SetCondition(*newCond) + } +} + +// MarkCustomRunSucceeded changes the Succeeded condition to True with the provided reason and message. +func (r *CustomRunStatus) MarkCustomRunSucceeded(reason, messageFormat string, messageA ...interface{}) { + customRunCondSet.Manage(r).MarkTrueWithReason(apis.ConditionSucceeded, reason, messageFormat, messageA...) + succeeded := r.GetCondition(apis.ConditionSucceeded) + r.CompletionTime = &succeeded.LastTransitionTime.Inner +} + +// MarkCustomRunFailed changes the Succeeded condition to False with the provided reason and message. +func (r *CustomRunStatus) MarkCustomRunFailed(reason, messageFormat string, messageA ...interface{}) { + customRunCondSet.Manage(r).MarkFalse(apis.ConditionSucceeded, reason, messageFormat, messageA...) + succeeded := r.GetCondition(apis.ConditionSucceeded) + r.CompletionTime = &succeeded.LastTransitionTime.Inner +} + +// MarkCustomRunRunning changes the Succeeded condition to Unknown with the provided reason and message. +func (r *CustomRunStatus) MarkCustomRunRunning(reason, messageFormat string, messageA ...interface{}) { + customRunCondSet.Manage(r).MarkUnknown(apis.ConditionSucceeded, reason, messageFormat, messageA...) +} + +// DecodeExtraFields deserializes the extra fields in the CustomRun status. +func (r *CustomRunStatus) DecodeExtraFields(into interface{}) error { + if len(r.ExtraFields.Raw) == 0 { + return nil + } + return json.Unmarshal(r.ExtraFields.Raw, into) +} + +// EncodeExtraFields serializes the extra fields in the CustomRun status. +func (r *CustomRunStatus) EncodeExtraFields(from interface{}) error { + data, err := json.Marshal(from) + if err != nil { + return err + } + r.ExtraFields = runtime.RawExtension{ + Raw: data, + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/doc.go new file mode 100644 index 0000000000..1308f5f0e9 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the customrun v1beta1 API group +// +groupName=tekton.dev +package v1beta1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..12465a6cb9 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,77 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRunStatus) DeepCopyInto(out *CustomRunStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + in.CustomRunStatusFields.DeepCopyInto(&out.CustomRunStatusFields) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomRunStatus. +func (in *CustomRunStatus) DeepCopy() *CustomRunStatus { + if in == nil { + return nil + } + out := new(CustomRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomRunStatusFields) DeepCopyInto(out *CustomRunStatusFields) { + *out = *in + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]CustomRunResult, len(*in)) + copy(*out, *in) + } + if in.RetriesStatus != nil { + in, out := &in.RetriesStatus, &out.RetriesStatus + *out = make([]CustomRunStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ExtraFields.DeepCopyInto(&out.ExtraFields) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunStatusFields. +func (in *CustomRunStatusFields) DeepCopy() *CustomRunStatusFields { + if in == nil { + return nil + } + out := new(CustomRunStatusFields) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go index e84bdce7ff..8b2549cfd0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go @@ -32,10 +32,18 @@ func (c *FakeTektonV1) Pipelines(namespace string) v1.PipelineInterface { return &FakePipelines{c, namespace} } +func (c *FakeTektonV1) PipelineRuns(namespace string) v1.PipelineRunInterface { + return &FakePipelineRuns{c, namespace} +} + func (c *FakeTektonV1) Tasks(namespace string) v1.TaskInterface { return &FakeTasks{c, namespace} } +func (c *FakeTektonV1) TaskRuns(namespace string) v1.TaskRunInterface { + return &FakeTaskRuns{c, namespace} +} + // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeTektonV1) RESTClient() rest.Interface { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipelinerun.go new file mode 100644 index 0000000000..c4d32bdd41 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipelinerun.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePipelineRuns implements PipelineRunInterface +type FakePipelineRuns struct { + Fake *FakeTektonV1 + ns string +} + +var pipelinerunsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1", Resource: "pipelineruns"} + +var pipelinerunsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1", Kind: "PipelineRun"} + +// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. +func (c *FakePipelineRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *pipelinev1.PipelineRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(pipelinerunsResource, c.ns, name), &pipelinev1.PipelineRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.PipelineRun), err +} + +// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. +func (c *FakePipelineRuns) List(ctx context.Context, opts v1.ListOptions) (result *pipelinev1.PipelineRunList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(pipelinerunsResource, pipelinerunsKind, c.ns, opts), &pipelinev1.PipelineRunList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &pipelinev1.PipelineRunList{ListMeta: obj.(*pipelinev1.PipelineRunList).ListMeta} + for _, item := range obj.(*pipelinev1.PipelineRunList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested pipelineRuns. +func (c *FakePipelineRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(pipelinerunsResource, c.ns, opts)) + +} + +// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *FakePipelineRuns) Create(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts v1.CreateOptions) (result *pipelinev1.PipelineRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(pipelinerunsResource, c.ns, pipelineRun), &pipelinev1.PipelineRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.PipelineRun), err +} + +// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *FakePipelineRuns) Update(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts v1.UpdateOptions) (result *pipelinev1.PipelineRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(pipelinerunsResource, c.ns, pipelineRun), &pipelinev1.PipelineRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.PipelineRun), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePipelineRuns) UpdateStatus(ctx context.Context, pipelineRun *pipelinev1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1.PipelineRun, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(pipelinerunsResource, "status", c.ns, pipelineRun), &pipelinev1.PipelineRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.PipelineRun), err +} + +// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. +func (c *FakePipelineRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(pipelinerunsResource, c.ns, name, opts), &pipelinev1.PipelineRun{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePipelineRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(pipelinerunsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &pipelinev1.PipelineRunList{}) + return err +} + +// Patch applies the patch and returns the patched pipelineRun. +func (c *FakePipelineRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.PipelineRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(pipelinerunsResource, c.ns, name, pt, data, subresources...), &pipelinev1.PipelineRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.PipelineRun), err +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_taskrun.go new file mode 100644 index 0000000000..e5b61a4b12 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_taskrun.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeTaskRuns implements TaskRunInterface +type FakeTaskRuns struct { + Fake *FakeTektonV1 + ns string +} + +var taskrunsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1", Resource: "taskruns"} + +var taskrunsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1", Kind: "TaskRun"} + +// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. +func (c *FakeTaskRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *pipelinev1.TaskRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(taskrunsResource, c.ns, name), &pipelinev1.TaskRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.TaskRun), err +} + +// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. +func (c *FakeTaskRuns) List(ctx context.Context, opts v1.ListOptions) (result *pipelinev1.TaskRunList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(taskrunsResource, taskrunsKind, c.ns, opts), &pipelinev1.TaskRunList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &pipelinev1.TaskRunList{ListMeta: obj.(*pipelinev1.TaskRunList).ListMeta} + for _, item := range obj.(*pipelinev1.TaskRunList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested taskRuns. +func (c *FakeTaskRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(taskrunsResource, c.ns, opts)) + +} + +// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *FakeTaskRuns) Create(ctx context.Context, taskRun *pipelinev1.TaskRun, opts v1.CreateOptions) (result *pipelinev1.TaskRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(taskrunsResource, c.ns, taskRun), &pipelinev1.TaskRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.TaskRun), err +} + +// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *FakeTaskRuns) Update(ctx context.Context, taskRun *pipelinev1.TaskRun, opts v1.UpdateOptions) (result *pipelinev1.TaskRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(taskrunsResource, c.ns, taskRun), &pipelinev1.TaskRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.TaskRun), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeTaskRuns) UpdateStatus(ctx context.Context, taskRun *pipelinev1.TaskRun, opts v1.UpdateOptions) (*pipelinev1.TaskRun, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(taskrunsResource, "status", c.ns, taskRun), &pipelinev1.TaskRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.TaskRun), err +} + +// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. +func (c *FakeTaskRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(taskrunsResource, c.ns, name, opts), &pipelinev1.TaskRun{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTaskRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(taskrunsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &pipelinev1.TaskRunList{}) + return err +} + +// Patch applies the patch and returns the patched taskRun. +func (c *FakeTaskRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.TaskRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(taskrunsResource, c.ns, name, pt, data, subresources...), &pipelinev1.TaskRun{}) + + if obj == nil { + return nil, err + } + return obj.(*pipelinev1.TaskRun), err +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go index f14eaed965..ec5a572458 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go @@ -20,4 +20,8 @@ package v1 type PipelineExpansion interface{} +type PipelineRunExpansion interface{} + type TaskExpansion interface{} + +type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go index 7e65d3314b..1b7f8d56ff 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go @@ -29,7 +29,9 @@ import ( type TektonV1Interface interface { RESTClient() rest.Interface PipelinesGetter + PipelineRunsGetter TasksGetter + TaskRunsGetter } // TektonV1Client is used to interact with features provided by the tekton.dev group. @@ -41,10 +43,18 @@ func (c *TektonV1Client) Pipelines(namespace string) PipelineInterface { return newPipelines(c, namespace) } +func (c *TektonV1Client) PipelineRuns(namespace string) PipelineRunInterface { + return newPipelineRuns(c, namespace) +} + func (c *TektonV1Client) Tasks(namespace string) TaskInterface { return newTasks(c, namespace) } +func (c *TektonV1Client) TaskRuns(namespace string) TaskRunInterface { + return newTaskRuns(c, namespace) +} + // NewForConfig creates a new TektonV1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipelinerun.go new file mode 100644 index 0000000000..0f37e00bc6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipelinerun.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PipelineRunsGetter has a method to return a PipelineRunInterface. +// A group's client should implement this interface. +type PipelineRunsGetter interface { + PipelineRuns(namespace string) PipelineRunInterface +} + +// PipelineRunInterface has methods to work with PipelineRun resources. +type PipelineRunInterface interface { + Create(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.CreateOptions) (*v1.PipelineRun, error) + Update(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.UpdateOptions) (*v1.PipelineRun, error) + UpdateStatus(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.UpdateOptions) (*v1.PipelineRun, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PipelineRun, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.PipelineRunList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PipelineRun, err error) + PipelineRunExpansion +} + +// pipelineRuns implements PipelineRunInterface +type pipelineRuns struct { + client rest.Interface + ns string +} + +// newPipelineRuns returns a PipelineRuns +func newPipelineRuns(c *TektonV1Client, namespace string) *pipelineRuns { + return &pipelineRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. +func (c *pipelineRuns) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PipelineRun, err error) { + result = &v1.PipelineRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. +func (c *pipelineRuns) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PipelineRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PipelineRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pipelineRuns. +func (c *pipelineRuns) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Create(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.CreateOptions) (result *v1.PipelineRun, err error) { + result = &v1.PipelineRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pipelineRun). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. +func (c *pipelineRuns) Update(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.UpdateOptions) (result *v1.PipelineRun, err error) { + result = &v1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pipelineRun). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *pipelineRuns) UpdateStatus(ctx context.Context, pipelineRun *v1.PipelineRun, opts metav1.UpdateOptions) (result *v1.PipelineRun, err error) { + result = &v1.PipelineRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(pipelineRun.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(pipelineRun). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. +func (c *pipelineRuns) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pipelineRuns) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pipelineruns"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched pipelineRun. +func (c *pipelineRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PipelineRun, err error) { + result = &v1.PipelineRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pipelineruns"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/taskrun.go new file mode 100644 index 0000000000..34145a9732 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/taskrun.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// TaskRunsGetter has a method to return a TaskRunInterface. +// A group's client should implement this interface. +type TaskRunsGetter interface { + TaskRuns(namespace string) TaskRunInterface +} + +// TaskRunInterface has methods to work with TaskRun resources. +type TaskRunInterface interface { + Create(ctx context.Context, taskRun *v1.TaskRun, opts metav1.CreateOptions) (*v1.TaskRun, error) + Update(ctx context.Context, taskRun *v1.TaskRun, opts metav1.UpdateOptions) (*v1.TaskRun, error) + UpdateStatus(ctx context.Context, taskRun *v1.TaskRun, opts metav1.UpdateOptions) (*v1.TaskRun, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.TaskRun, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.TaskRunList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TaskRun, err error) + TaskRunExpansion +} + +// taskRuns implements TaskRunInterface +type taskRuns struct { + client rest.Interface + ns string +} + +// newTaskRuns returns a TaskRuns +func newTaskRuns(c *TektonV1Client, namespace string) *taskRuns { + return &taskRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. +func (c *taskRuns) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.TaskRun, err error) { + result = &v1.TaskRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. +func (c *taskRuns) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TaskRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.TaskRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested taskRuns. +func (c *taskRuns) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Create(ctx context.Context, taskRun *v1.TaskRun, opts metav1.CreateOptions) (result *v1.TaskRun, err error) { + result = &v1.TaskRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(taskRun). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. +func (c *taskRuns) Update(ctx context.Context, taskRun *v1.TaskRun, opts metav1.UpdateOptions) (result *v1.TaskRun, err error) { + result = &v1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(taskRun). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *taskRuns) UpdateStatus(ctx context.Context, taskRun *v1.TaskRun, opts metav1.UpdateOptions) (result *v1.TaskRun, err error) { + result = &v1.TaskRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("taskruns"). + Name(taskRun.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(taskRun). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. +func (c *taskRuns) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *taskRuns) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("taskruns"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched taskRun. +func (c *taskRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.TaskRun, err error) { + result = &v1.TaskRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("taskruns"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/customrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/customrun.go new file mode 100644 index 0000000000..a5755045bd --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/customrun.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CustomRunsGetter has a method to return a CustomRunInterface. +// A group's client should implement this interface. +type CustomRunsGetter interface { + CustomRuns(namespace string) CustomRunInterface +} + +// CustomRunInterface has methods to work with CustomRun resources. +type CustomRunInterface interface { + Create(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.CreateOptions) (*v1beta1.CustomRun, error) + Update(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) + UpdateStatus(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomRun, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomRunList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomRun, err error) + CustomRunExpansion +} + +// customRuns implements CustomRunInterface +type customRuns struct { + client rest.Interface + ns string +} + +// newCustomRuns returns a CustomRuns +func newCustomRuns(c *TektonV1beta1Client, namespace string) *customRuns { + return &customRuns{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the customRun, and returns the corresponding customRun object, and an error if there is any. +func (c *customRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomRun, err error) { + result = &v1beta1.CustomRun{} + err = c.client.Get(). + Namespace(c.ns). + Resource("customruns"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CustomRuns that match those selectors. +func (c *customRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomRunList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CustomRunList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("customruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested customRuns. +func (c *customRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("customruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a customRun and creates it. Returns the server's representation of the customRun, and an error, if there is any. +func (c *customRuns) Create(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.CreateOptions) (result *v1beta1.CustomRun, err error) { + result = &v1beta1.CustomRun{} + err = c.client.Post(). + Namespace(c.ns). + Resource("customruns"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customRun). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a customRun and updates it. Returns the server's representation of the customRun, and an error, if there is any. +func (c *customRuns) Update(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (result *v1beta1.CustomRun, err error) { + result = &v1beta1.CustomRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("customruns"). + Name(customRun.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customRun). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *customRuns) UpdateStatus(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (result *v1beta1.CustomRun, err error) { + result = &v1beta1.CustomRun{} + err = c.client.Put(). + Namespace(c.ns). + Resource("customruns"). + Name(customRun.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(customRun). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the customRun and deletes it. Returns an error if one occurs. +func (c *customRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("customruns"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *customRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("customruns"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched customRun. +func (c *customRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomRun, err error) { + result = &v1beta1.CustomRun{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("customruns"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_customrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_customrun.go new file mode 100644 index 0000000000..f80f4a1785 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_customrun.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCustomRuns implements CustomRunInterface +type FakeCustomRuns struct { + Fake *FakeTektonV1beta1 + ns string +} + +var customrunsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1beta1", Resource: "customruns"} + +var customrunsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1beta1", Kind: "CustomRun"} + +// Get takes name of the customRun, and returns the corresponding customRun object, and an error if there is any. +func (c *FakeCustomRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CustomRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(customrunsResource, c.ns, name), &v1beta1.CustomRun{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomRun), err +} + +// List takes label and field selectors, and returns the list of CustomRuns that match those selectors. +func (c *FakeCustomRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CustomRunList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(customrunsResource, customrunsKind, c.ns, opts), &v1beta1.CustomRunList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.CustomRunList{ListMeta: obj.(*v1beta1.CustomRunList).ListMeta} + for _, item := range obj.(*v1beta1.CustomRunList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested customRuns. +func (c *FakeCustomRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(customrunsResource, c.ns, opts)) + +} + +// Create takes the representation of a customRun and creates it. Returns the server's representation of the customRun, and an error, if there is any. +func (c *FakeCustomRuns) Create(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.CreateOptions) (result *v1beta1.CustomRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(customrunsResource, c.ns, customRun), &v1beta1.CustomRun{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomRun), err +} + +// Update takes the representation of a customRun and updates it. Returns the server's representation of the customRun, and an error, if there is any. +func (c *FakeCustomRuns) Update(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (result *v1beta1.CustomRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(customrunsResource, c.ns, customRun), &v1beta1.CustomRun{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomRun), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCustomRuns) UpdateStatus(ctx context.Context, customRun *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(customrunsResource, "status", c.ns, customRun), &v1beta1.CustomRun{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomRun), err +} + +// Delete takes name of the customRun and deletes it. Returns an error if one occurs. +func (c *FakeCustomRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(customrunsResource, c.ns, name, opts), &v1beta1.CustomRun{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCustomRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(customrunsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.CustomRunList{}) + return err +} + +// Patch applies the patch and returns the patched customRun. +func (c *FakeCustomRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomRun, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(customrunsResource, c.ns, name, pt, data, subresources...), &v1beta1.CustomRun{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CustomRun), err +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go index 55c4c40d3f..a142026b2b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/fake/fake_pipeline_client.go @@ -32,6 +32,10 @@ func (c *FakeTektonV1beta1) ClusterTasks() v1beta1.ClusterTaskInterface { return &FakeClusterTasks{c} } +func (c *FakeTektonV1beta1) CustomRuns(namespace string) v1beta1.CustomRunInterface { + return &FakeCustomRuns{c, namespace} +} + func (c *FakeTektonV1beta1) Pipelines(namespace string) v1beta1.PipelineInterface { return &FakePipelines{c, namespace} } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go index 83951f9851..b9f3554be3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go @@ -20,6 +20,8 @@ package v1beta1 type ClusterTaskExpansion interface{} +type CustomRunExpansion interface{} + type PipelineExpansion interface{} type PipelineRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go index bec6312585..0974d31771 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go @@ -29,6 +29,7 @@ import ( type TektonV1beta1Interface interface { RESTClient() rest.Interface ClusterTasksGetter + CustomRunsGetter PipelinesGetter PipelineRunsGetter TasksGetter @@ -44,6 +45,10 @@ func (c *TektonV1beta1Client) ClusterTasks() ClusterTaskInterface { return newClusterTasks(c) } +func (c *TektonV1beta1Client) CustomRuns(namespace string) CustomRunInterface { + return newCustomRuns(c, namespace) +} + func (c *TektonV1beta1Client) Pipelines(namespace string) PipelineInterface { return newPipelines(c, namespace) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go index 2abfa5f70f..3f3c58d271 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go @@ -57,8 +57,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=tekton.dev, Version=v1 case v1.SchemeGroupVersion.WithResource("pipelines"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().Pipelines().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("pipelineruns"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().PipelineRuns().Informer()}, nil case v1.SchemeGroupVersion.WithResource("tasks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().Tasks().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("taskruns"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().TaskRuns().Informer()}, nil // Group=tekton.dev, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("runs"): @@ -67,6 +71,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=tekton.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("clustertasks"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().ClusterTasks().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("customruns"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().CustomRuns().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("pipelines"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1beta1().Pipelines().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("pipelineruns"): diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go index 8a333b97c2..67690389f5 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go @@ -26,8 +26,12 @@ import ( type Interface interface { // Pipelines returns a PipelineInformer. Pipelines() PipelineInformer + // PipelineRuns returns a PipelineRunInformer. + PipelineRuns() PipelineRunInformer // Tasks returns a TaskInformer. Tasks() TaskInformer + // TaskRuns returns a TaskRunInformer. + TaskRuns() TaskRunInformer } type version struct { @@ -46,7 +50,17 @@ func (v *version) Pipelines() PipelineInformer { return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// PipelineRuns returns a PipelineRunInformer. +func (v *version) PipelineRuns() PipelineRunInformer { + return &pipelineRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Tasks returns a TaskInformer. func (v *version) Tasks() TaskInformer { return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } + +// TaskRuns returns a TaskRunInformer. +func (v *version) TaskRuns() TaskRunInformer { + return &taskRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/pipelinerun.go new file mode 100644 index 0000000000..4e02db3c5b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/pipelinerun.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PipelineRunInformer provides access to a shared informer and lister for +// PipelineRuns. +type PipelineRunInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PipelineRunLister +} + +type pipelineRunInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPipelineRunInformer constructs a new informer for PipelineRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPipelineRunInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPipelineRunInformer constructs a new informer for PipelineRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1().PipelineRuns(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1().PipelineRuns(namespace).Watch(context.TODO(), options) + }, + }, + &pipelinev1.PipelineRun{}, + resyncPeriod, + indexers, + ) +} + +func (f *pipelineRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPipelineRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *pipelineRunInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&pipelinev1.PipelineRun{}, f.defaultInformer) +} + +func (f *pipelineRunInformer) Lister() v1.PipelineRunLister { + return v1.NewPipelineRunLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/taskrun.go new file mode 100644 index 0000000000..d8ef20c043 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/taskrun.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// TaskRunInformer provides access to a shared informer and lister for +// TaskRuns. +type TaskRunInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.TaskRunLister +} + +type taskRunInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTaskRunInformer constructs a new informer for TaskRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTaskRunInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTaskRunInformer constructs a new informer for TaskRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1().TaskRuns(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1().TaskRuns(namespace).Watch(context.TODO(), options) + }, + }, + &pipelinev1.TaskRun{}, + resyncPeriod, + indexers, + ) +} + +func (f *taskRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTaskRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *taskRunInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&pipelinev1.TaskRun{}, f.defaultInformer) +} + +func (f *taskRunInformer) Lister() v1.TaskRunLister { + return v1.NewTaskRunLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/customrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/customrun.go new file mode 100644 index 0000000000..532a61c8b6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/customrun.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CustomRunInformer provides access to a shared informer and lister for +// CustomRuns. +type CustomRunInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CustomRunLister +} + +type customRunInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCustomRunInformer constructs a new informer for CustomRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCustomRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCustomRunInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCustomRunInformer constructs a new informer for CustomRun type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCustomRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1beta1().CustomRuns(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TektonV1beta1().CustomRuns(namespace).Watch(context.TODO(), options) + }, + }, + &pipelinev1beta1.CustomRun{}, + resyncPeriod, + indexers, + ) +} + +func (f *customRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCustomRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *customRunInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&pipelinev1beta1.CustomRun{}, f.defaultInformer) +} + +func (f *customRunInformer) Lister() v1beta1.CustomRunLister { + return v1beta1.NewCustomRunLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go index 37b4f5364c..307843a801 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // ClusterTasks returns a ClusterTaskInformer. ClusterTasks() ClusterTaskInformer + // CustomRuns returns a CustomRunInformer. + CustomRuns() CustomRunInformer // Pipelines returns a PipelineInformer. Pipelines() PipelineInformer // PipelineRuns returns a PipelineRunInformer. @@ -52,6 +54,11 @@ func (v *version) ClusterTasks() ClusterTaskInformer { return &clusterTaskInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// CustomRuns returns a CustomRunInformer. +func (v *version) CustomRuns() CustomRunInformer { + return &customRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // Pipelines returns a PipelineInformer. func (v *version) Pipelines() PipelineInformer { return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go index 364bbf6766..3104910cf6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go @@ -388,6 +388,137 @@ func (w *wrapTektonV1beta1ClusterTaskImpl) Watch(ctx context.Context, opts v1.Li return nil, errors.New("NYI: Watch") } +func (w *wrapTektonV1beta1) CustomRuns(namespace string) typedtektonv1beta1.CustomRunInterface { + return &wrapTektonV1beta1CustomRunImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "tekton.dev", + Version: "v1beta1", + Resource: "customruns", + }), + + namespace: namespace, + } +} + +type wrapTektonV1beta1CustomRunImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedtektonv1beta1.CustomRunInterface = (*wrapTektonV1beta1CustomRunImpl)(nil) + +func (w *wrapTektonV1beta1CustomRunImpl) Create(ctx context.Context, in *v1beta1.CustomRun, opts v1.CreateOptions) (*v1beta1.CustomRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1beta1", + Kind: "CustomRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapTektonV1beta1CustomRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapTektonV1beta1CustomRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CustomRun, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CustomRunList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRunList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CustomRun, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) Update(ctx context.Context, in *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1beta1", + Kind: "CustomRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) UpdateStatus(ctx context.Context, in *v1beta1.CustomRun, opts v1.UpdateOptions) (*v1beta1.CustomRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1beta1", + Kind: "CustomRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.CustomRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1beta1CustomRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} + func (w *wrapTektonV1beta1) Pipelines(namespace string) typedtektonv1beta1.PipelineInterface { return &wrapTektonV1beta1PipelineImpl{ dyn: w.dyn.Resource(schema.GroupVersionResource{ @@ -1058,6 +1189,137 @@ func (w *wrapTektonV1PipelineImpl) Watch(ctx context.Context, opts v1.ListOption return nil, errors.New("NYI: Watch") } +func (w *wrapTektonV1) PipelineRuns(namespace string) typedtektonv1.PipelineRunInterface { + return &wrapTektonV1PipelineRunImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "tekton.dev", + Version: "v1", + Resource: "pipelineruns", + }), + + namespace: namespace, + } +} + +type wrapTektonV1PipelineRunImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedtektonv1.PipelineRunInterface = (*wrapTektonV1PipelineRunImpl)(nil) + +func (w *wrapTektonV1PipelineRunImpl) Create(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.CreateOptions) (*pipelinev1.PipelineRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "PipelineRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapTektonV1PipelineRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapTektonV1PipelineRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.PipelineRun, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.PipelineRunList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRunList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.PipelineRun, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) Update(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1.PipelineRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "PipelineRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) UpdateStatus(ctx context.Context, in *pipelinev1.PipelineRun, opts v1.UpdateOptions) (*pipelinev1.PipelineRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "PipelineRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.PipelineRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1PipelineRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} + func (w *wrapTektonV1) Tasks(namespace string) typedtektonv1.TaskInterface { return &wrapTektonV1TaskImpl{ dyn: w.dyn.Resource(schema.GroupVersionResource{ @@ -1188,3 +1450,134 @@ func (w *wrapTektonV1TaskImpl) UpdateStatus(ctx context.Context, in *pipelinev1. func (w *wrapTektonV1TaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return nil, errors.New("NYI: Watch") } + +func (w *wrapTektonV1) TaskRuns(namespace string) typedtektonv1.TaskRunInterface { + return &wrapTektonV1TaskRunImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "tekton.dev", + Version: "v1", + Resource: "taskruns", + }), + + namespace: namespace, + } +} + +type wrapTektonV1TaskRunImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedtektonv1.TaskRunInterface = (*wrapTektonV1TaskRunImpl)(nil) + +func (w *wrapTektonV1TaskRunImpl) Create(ctx context.Context, in *pipelinev1.TaskRun, opts v1.CreateOptions) (*pipelinev1.TaskRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "TaskRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapTektonV1TaskRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapTektonV1TaskRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.TaskRun, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.TaskRunList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRunList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.TaskRun, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) Update(ctx context.Context, in *pipelinev1.TaskRun, opts v1.UpdateOptions) (*pipelinev1.TaskRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "TaskRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) UpdateStatus(ctx context.Context, in *pipelinev1.TaskRun, opts v1.UpdateOptions) (*pipelinev1.TaskRun, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "TaskRun", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskRun{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go index c2c17a833c..93a3b87567 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go @@ -26,6 +26,14 @@ type PipelineListerExpansion interface{} // PipelineNamespaceLister. type PipelineNamespaceListerExpansion interface{} +// PipelineRunListerExpansion allows custom methods to be added to +// PipelineRunLister. +type PipelineRunListerExpansion interface{} + +// PipelineRunNamespaceListerExpansion allows custom methods to be added to +// PipelineRunNamespaceLister. +type PipelineRunNamespaceListerExpansion interface{} + // TaskListerExpansion allows custom methods to be added to // TaskLister. type TaskListerExpansion interface{} @@ -33,3 +41,11 @@ type TaskListerExpansion interface{} // TaskNamespaceListerExpansion allows custom methods to be added to // TaskNamespaceLister. type TaskNamespaceListerExpansion interface{} + +// TaskRunListerExpansion allows custom methods to be added to +// TaskRunLister. +type TaskRunListerExpansion interface{} + +// TaskRunNamespaceListerExpansion allows custom methods to be added to +// TaskRunNamespaceLister. +type TaskRunNamespaceListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/pipelinerun.go new file mode 100644 index 0000000000..4a16673da0 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/pipelinerun.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PipelineRunLister helps list PipelineRuns. +// All objects returned here must be treated as read-only. +type PipelineRunLister interface { + // List lists all PipelineRuns in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PipelineRun, err error) + // PipelineRuns returns an object that can list and get PipelineRuns. + PipelineRuns(namespace string) PipelineRunNamespaceLister + PipelineRunListerExpansion +} + +// pipelineRunLister implements the PipelineRunLister interface. +type pipelineRunLister struct { + indexer cache.Indexer +} + +// NewPipelineRunLister returns a new PipelineRunLister. +func NewPipelineRunLister(indexer cache.Indexer) PipelineRunLister { + return &pipelineRunLister{indexer: indexer} +} + +// List lists all PipelineRuns in the indexer. +func (s *pipelineRunLister) List(selector labels.Selector) (ret []*v1.PipelineRun, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PipelineRun)) + }) + return ret, err +} + +// PipelineRuns returns an object that can list and get PipelineRuns. +func (s *pipelineRunLister) PipelineRuns(namespace string) PipelineRunNamespaceLister { + return pipelineRunNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PipelineRunNamespaceLister helps list and get PipelineRuns. +// All objects returned here must be treated as read-only. +type PipelineRunNamespaceLister interface { + // List lists all PipelineRuns in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.PipelineRun, err error) + // Get retrieves the PipelineRun from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.PipelineRun, error) + PipelineRunNamespaceListerExpansion +} + +// pipelineRunNamespaceLister implements the PipelineRunNamespaceLister +// interface. +type pipelineRunNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PipelineRuns in the indexer for a given namespace. +func (s pipelineRunNamespaceLister) List(selector labels.Selector) (ret []*v1.PipelineRun, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PipelineRun)) + }) + return ret, err +} + +// Get retrieves the PipelineRun from the indexer for a given namespace and name. +func (s pipelineRunNamespaceLister) Get(name string) (*v1.PipelineRun, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("pipelinerun"), name) + } + return obj.(*v1.PipelineRun), nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/taskrun.go new file mode 100644 index 0000000000..7f5e2c028b --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/taskrun.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// TaskRunLister helps list TaskRuns. +// All objects returned here must be treated as read-only. +type TaskRunLister interface { + // List lists all TaskRuns in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.TaskRun, err error) + // TaskRuns returns an object that can list and get TaskRuns. + TaskRuns(namespace string) TaskRunNamespaceLister + TaskRunListerExpansion +} + +// taskRunLister implements the TaskRunLister interface. +type taskRunLister struct { + indexer cache.Indexer +} + +// NewTaskRunLister returns a new TaskRunLister. +func NewTaskRunLister(indexer cache.Indexer) TaskRunLister { + return &taskRunLister{indexer: indexer} +} + +// List lists all TaskRuns in the indexer. +func (s *taskRunLister) List(selector labels.Selector) (ret []*v1.TaskRun, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TaskRun)) + }) + return ret, err +} + +// TaskRuns returns an object that can list and get TaskRuns. +func (s *taskRunLister) TaskRuns(namespace string) TaskRunNamespaceLister { + return taskRunNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TaskRunNamespaceLister helps list and get TaskRuns. +// All objects returned here must be treated as read-only. +type TaskRunNamespaceLister interface { + // List lists all TaskRuns in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.TaskRun, err error) + // Get retrieves the TaskRun from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.TaskRun, error) + TaskRunNamespaceListerExpansion +} + +// taskRunNamespaceLister implements the TaskRunNamespaceLister +// interface. +type taskRunNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TaskRuns in the indexer for a given namespace. +func (s taskRunNamespaceLister) List(selector labels.Selector) (ret []*v1.TaskRun, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TaskRun)) + }) + return ret, err +} + +// Get retrieves the TaskRun from the indexer for a given namespace and name. +func (s taskRunNamespaceLister) Get(name string) (*v1.TaskRun, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("taskrun"), name) + } + return obj.(*v1.TaskRun), nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/customrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/customrun.go new file mode 100644 index 0000000000..546668a17e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/customrun.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CustomRunLister helps list CustomRuns. +// All objects returned here must be treated as read-only. +type CustomRunLister interface { + // List lists all CustomRuns in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.CustomRun, err error) + // CustomRuns returns an object that can list and get CustomRuns. + CustomRuns(namespace string) CustomRunNamespaceLister + CustomRunListerExpansion +} + +// customRunLister implements the CustomRunLister interface. +type customRunLister struct { + indexer cache.Indexer +} + +// NewCustomRunLister returns a new CustomRunLister. +func NewCustomRunLister(indexer cache.Indexer) CustomRunLister { + return &customRunLister{indexer: indexer} +} + +// List lists all CustomRuns in the indexer. +func (s *customRunLister) List(selector labels.Selector) (ret []*v1beta1.CustomRun, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CustomRun)) + }) + return ret, err +} + +// CustomRuns returns an object that can list and get CustomRuns. +func (s *customRunLister) CustomRuns(namespace string) CustomRunNamespaceLister { + return customRunNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CustomRunNamespaceLister helps list and get CustomRuns. +// All objects returned here must be treated as read-only. +type CustomRunNamespaceLister interface { + // List lists all CustomRuns in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.CustomRun, err error) + // Get retrieves the CustomRun from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.CustomRun, error) + CustomRunNamespaceListerExpansion +} + +// customRunNamespaceLister implements the CustomRunNamespaceLister +// interface. +type customRunNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CustomRuns in the indexer for a given namespace. +func (s customRunNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CustomRun, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CustomRun)) + }) + return ret, err +} + +// Get retrieves the CustomRun from the indexer for a given namespace and name. +func (s customRunNamespaceLister) Get(name string) (*v1beta1.CustomRun, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("customrun"), name) + } + return obj.(*v1beta1.CustomRun), nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/expansion_generated.go index 3da70592c5..db5d996e61 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/expansion_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/expansion_generated.go @@ -22,6 +22,14 @@ package v1beta1 // ClusterTaskLister. type ClusterTaskListerExpansion interface{} +// CustomRunListerExpansion allows custom methods to be added to +// CustomRunLister. +type CustomRunListerExpansion interface{} + +// CustomRunNamespaceListerExpansion allows custom methods to be added to +// CustomRunNamespaceLister. +type CustomRunNamespaceListerExpansion interface{} + // PipelineListerExpansion allows custom methods to be added to // PipelineLister. type PipelineListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go similarity index 80% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/clientset.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go index 2cdb8bd7c1..df12dfc026 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/clientset.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,7 +22,8 @@ import ( "fmt" "net/http" - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -31,6 +32,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1Interface + ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -38,6 +40,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient resolutionV1alpha1 *resolutionv1alpha1.ResolutionV1alpha1Client + resolutionV1beta1 *resolutionv1beta1.ResolutionV1beta1Client } // ResolutionV1alpha1 retrieves the ResolutionV1alpha1Client @@ -45,6 +48,11 @@ func (c *Clientset) ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1In return c.resolutionV1alpha1 } +// ResolutionV1beta1 retrieves the ResolutionV1beta1Client +func (c *Clientset) ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface { + return c.resolutionV1beta1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -61,6 +69,10 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + // share the transport between all clients httpClient, err := rest.HTTPClientFor(&configShallowCopy) if err != nil { @@ -89,6 +101,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.resolutionV1beta1, err = resolutionv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -111,6 +127,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.resolutionV1alpha1 = resolutionv1alpha1.New(c) + cs.resolutionV1beta1 = resolutionv1beta1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/doc.go similarity index 95% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/doc.go index d14e1f711c..0d13552ae2 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/clientset_generated.go similarity index 75% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/clientset_generated.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/clientset_generated.go index 42090ab85f..b8bae4b809 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,9 +19,11 @@ limitations under the License. package fake import ( - clientset "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1" - fakeresolutionv1alpha1 "github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake" + clientset "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" + fakeresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake" + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1" + fakeresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -83,3 +85,8 @@ var ( func (c *Clientset) ResolutionV1alpha1() resolutionv1alpha1.ResolutionV1alpha1Interface { return &fakeresolutionv1alpha1.FakeResolutionV1alpha1{Fake: &c.Fake} } + +// ResolutionV1beta1 retrieves the ResolutionV1beta1Client +func (c *Clientset) ResolutionV1beta1() resolutionv1beta1.ResolutionV1beta1Interface { + return &fakeresolutionv1beta1.FakeResolutionV1beta1{Fake: &c.Fake} +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/doc.go similarity index 95% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/doc.go index 7a691f8310..4e4dbb6818 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/register.go similarity index 88% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/register.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/register.go index bda1637698..70d792b35d 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ limitations under the License. package fake import ( - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,6 +33,7 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ resolutionv1alpha1.AddToScheme, + resolutionv1beta1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/doc.go similarity index 95% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/doc.go index 32e13e4ed2..0fb16cc056 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/register.go similarity index 89% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/register.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/register.go index 2d970993cc..9fe0d86a62 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ limitations under the License. package scheme import ( - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,6 +33,7 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ resolutionv1alpha1.AddToScheme, + resolutionv1beta1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/doc.go similarity index 95% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/doc.go index 9446bef1d3..69ed294b82 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go similarity index 95% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go index 0495aeeded..1a72e0befe 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go similarity index 88% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go index cd24545d37..a442a07b0e 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolution_client.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - v1alpha1 "github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go similarity index 98% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go index a8cecc357a..ce8a7d2bad 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/fake/fake_resolutionrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ package fake import ( "context" - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go similarity index 94% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go index 58dd861823..f11a8b5169 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go similarity index 94% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go index c756db5848..bc70e73c9a 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolution_client.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ package v1alpha1 import ( "net/http" - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - "github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go similarity index 97% rename from vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go index a4a5258ae7..6169f8a3ad 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1/resolutionrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,8 +22,8 @@ import ( "context" "time" - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - scheme "github.com/tektoncd/resolution/pkg/client/clientset/versioned/scheme" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/doc.go new file mode 100644 index 0000000000..acfb8c0b67 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/doc.go new file mode 100644 index 0000000000..1a72e0befe --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolution_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolution_client.go new file mode 100644 index 0000000000..fbe98e3880 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolution_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeResolutionV1beta1 struct { + *testing.Fake +} + +func (c *FakeResolutionV1beta1) ResolutionRequests(namespace string) v1beta1.ResolutionRequestInterface { + return &FakeResolutionRequests{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeResolutionV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolutionrequest.go new file mode 100644 index 0000000000..29f0cb2a9d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/fake/fake_resolutionrequest.go @@ -0,0 +1,142 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeResolutionRequests implements ResolutionRequestInterface +type FakeResolutionRequests struct { + Fake *FakeResolutionV1beta1 + ns string +} + +var resolutionrequestsResource = schema.GroupVersionResource{Group: "resolution.tekton.dev", Version: "v1beta1", Resource: "resolutionrequests"} + +var resolutionrequestsKind = schema.GroupVersionKind{Group: "resolution.tekton.dev", Version: "v1beta1", Kind: "ResolutionRequest"} + +// Get takes name of the resolutionRequest, and returns the corresponding resolutionRequest object, and an error if there is any. +func (c *FakeResolutionRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ResolutionRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(resolutionrequestsResource, c.ns, name), &v1beta1.ResolutionRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ResolutionRequest), err +} + +// List takes label and field selectors, and returns the list of ResolutionRequests that match those selectors. +func (c *FakeResolutionRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ResolutionRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(resolutionrequestsResource, resolutionrequestsKind, c.ns, opts), &v1beta1.ResolutionRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ResolutionRequestList{ListMeta: obj.(*v1beta1.ResolutionRequestList).ListMeta} + for _, item := range obj.(*v1beta1.ResolutionRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resolutionRequests. +func (c *FakeResolutionRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(resolutionrequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a resolutionRequest and creates it. Returns the server's representation of the resolutionRequest, and an error, if there is any. +func (c *FakeResolutionRequests) Create(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.CreateOptions) (result *v1beta1.ResolutionRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(resolutionrequestsResource, c.ns, resolutionRequest), &v1beta1.ResolutionRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ResolutionRequest), err +} + +// Update takes the representation of a resolutionRequest and updates it. Returns the server's representation of the resolutionRequest, and an error, if there is any. +func (c *FakeResolutionRequests) Update(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (result *v1beta1.ResolutionRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resolutionrequestsResource, c.ns, resolutionRequest), &v1beta1.ResolutionRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ResolutionRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResolutionRequests) UpdateStatus(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resolutionrequestsResource, "status", c.ns, resolutionRequest), &v1beta1.ResolutionRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ResolutionRequest), err +} + +// Delete takes name of the resolutionRequest and deletes it. Returns an error if one occurs. +func (c *FakeResolutionRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(resolutionrequestsResource, c.ns, name, opts), &v1beta1.ResolutionRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResolutionRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resolutionrequestsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.ResolutionRequestList{}) + return err +} + +// Patch applies the patch and returns the patched resolutionRequest. +func (c *FakeResolutionRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ResolutionRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resolutionrequestsResource, c.ns, name, pt, data, subresources...), &v1beta1.ResolutionRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.ResolutionRequest), err +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/generated_expansion.go new file mode 100644 index 0000000000..85d49c121a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type ResolutionRequestExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolution_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolution_client.go new file mode 100644 index 0000000000..8d13b836c0 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolution_client.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ResolutionV1beta1Interface interface { + RESTClient() rest.Interface + ResolutionRequestsGetter +} + +// ResolutionV1beta1Client is used to interact with features provided by the resolution.tekton.dev group. +type ResolutionV1beta1Client struct { + restClient rest.Interface +} + +func (c *ResolutionV1beta1Client) ResolutionRequests(namespace string) ResolutionRequestInterface { + return newResolutionRequests(c, namespace) +} + +// NewForConfig creates a new ResolutionV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ResolutionV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ResolutionV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ResolutionV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ResolutionV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ResolutionV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ResolutionV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ResolutionV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ResolutionV1beta1Client { + return &ResolutionV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ResolutionV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolutionrequest.go new file mode 100644 index 0000000000..e09ca18da6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1/resolutionrequest.go @@ -0,0 +1,195 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + scheme "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResolutionRequestsGetter has a method to return a ResolutionRequestInterface. +// A group's client should implement this interface. +type ResolutionRequestsGetter interface { + ResolutionRequests(namespace string) ResolutionRequestInterface +} + +// ResolutionRequestInterface has methods to work with ResolutionRequest resources. +type ResolutionRequestInterface interface { + Create(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.CreateOptions) (*v1beta1.ResolutionRequest, error) + Update(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) + UpdateStatus(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ResolutionRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ResolutionRequestList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ResolutionRequest, err error) + ResolutionRequestExpansion +} + +// resolutionRequests implements ResolutionRequestInterface +type resolutionRequests struct { + client rest.Interface + ns string +} + +// newResolutionRequests returns a ResolutionRequests +func newResolutionRequests(c *ResolutionV1beta1Client, namespace string) *resolutionRequests { + return &resolutionRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resolutionRequest, and returns the corresponding resolutionRequest object, and an error if there is any. +func (c *resolutionRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ResolutionRequest, err error) { + result = &v1beta1.ResolutionRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resolutionrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResolutionRequests that match those selectors. +func (c *resolutionRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ResolutionRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ResolutionRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resolutionrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resolutionRequests. +func (c *resolutionRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resolutionrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a resolutionRequest and creates it. Returns the server's representation of the resolutionRequest, and an error, if there is any. +func (c *resolutionRequests) Create(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.CreateOptions) (result *v1beta1.ResolutionRequest, err error) { + result = &v1beta1.ResolutionRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resolutionrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resolutionRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a resolutionRequest and updates it. Returns the server's representation of the resolutionRequest, and an error, if there is any. +func (c *resolutionRequests) Update(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (result *v1beta1.ResolutionRequest, err error) { + result = &v1beta1.ResolutionRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resolutionrequests"). + Name(resolutionRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resolutionRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *resolutionRequests) UpdateStatus(ctx context.Context, resolutionRequest *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (result *v1beta1.ResolutionRequest, err error) { + result = &v1beta1.ResolutionRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resolutionrequests"). + Name(resolutionRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(resolutionRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the resolutionRequest and deletes it. Returns an error if one occurs. +func (c *resolutionRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resolutionrequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resolutionRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resolutionrequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched resolutionRequest. +func (c *resolutionRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ResolutionRequest, err error) { + result = &v1beta1.ResolutionRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resolutionrequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/factory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go similarity index 94% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/factory.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go index 5ced845393..39c9230ec8 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/factory.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,9 +23,9 @@ import ( sync "sync" time "time" - versioned "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces" - resolution "github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" + resolution "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/generic.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/generic.go similarity index 81% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/generic.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/generic.go index ee712a6241..a6c63df2d4 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/generic.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,8 @@ package externalversions import ( "fmt" - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -56,6 +57,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha1.SchemeGroupVersion.WithResource("resolutionrequests"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resolution().V1alpha1().ResolutionRequests().Informer()}, nil + // Group=resolution.tekton.dev, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("resolutionrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Resolution().V1beta1().ResolutionRequests().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces/factory_interfaces.go similarity index 91% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces/factory_interfaces.go index c54ee0da5f..e5d27987c9 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ package internalinterfaces import ( time "time" - versioned "github.com/tektoncd/resolution/pkg/client/clientset/versioned" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/interface.go similarity index 68% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/interface.go index 9d88e8751a..413b9292f9 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,14 +19,17 @@ limitations under the License. package resolution import ( - internalinterfaces "github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1" + v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { // V1alpha1 provides access to shared informers for resources in V1alpha1. V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface } type group struct { @@ -44,3 +47,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1alpha1() v1alpha1.Interface { return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/interface.go similarity index 90% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/interface.go index aac054a885..e5bfc1d1f1 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - internalinterfaces "github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/resolutionrequest.go similarity index 89% rename from vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/resolutionrequest.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/resolutionrequest.go index f12abfcd29..f896ee29fc 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1/resolutionrequest.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1/resolutionrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,10 +22,10 @@ import ( "context" time "time" - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - versioned "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/resolution/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/interface.go new file mode 100644 index 0000000000..de6bf33c01 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ResolutionRequests returns a ResolutionRequestInformer. + ResolutionRequests() ResolutionRequestInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ResolutionRequests returns a ResolutionRequestInformer. +func (v *version) ResolutionRequests() ResolutionRequestInformer { + return &resolutionRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/resolutionrequest.go new file mode 100644 index 0000000000..ed770102c7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1beta1/resolutionrequest.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + resolutionv1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/internalinterfaces" + v1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ResolutionRequestInformer provides access to a shared informer and lister for +// ResolutionRequests. +type ResolutionRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ResolutionRequestLister +} + +type resolutionRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResolutionRequestInformer constructs a new informer for ResolutionRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResolutionRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResolutionRequestInformer constructs a new informer for ResolutionRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResolutionRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResolutionV1beta1().ResolutionRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ResolutionV1beta1().ResolutionRequests(namespace).Watch(context.TODO(), options) + }, + }, + &resolutionv1beta1.ResolutionRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *resolutionRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResolutionRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resolutionRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&resolutionv1beta1.ResolutionRequest{}, f.defaultInformer) +} + +func (f *resolutionRequestInformer) Lister() v1beta1.ResolutionRequestLister { + return v1beta1.NewResolutionRequestLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go similarity index 57% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/client/client.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go index 0c3c7f8371..2b357f8253 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/client/client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/client.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,9 +24,11 @@ import ( errors "errors" fmt "fmt" - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - versioned "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - typedresolutionv1alpha1 "github.com/tektoncd/resolution/pkg/client/clientset/versioned/typed/resolution/v1alpha1" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + typedresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" + typedresolutionv1beta1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" runtime "k8s.io/apimachinery/pkg/runtime" @@ -66,10 +68,10 @@ func Get(ctx context.Context) versioned.Interface { if untyped == nil { if injection.GetConfig(ctx) == nil { logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/resolution/pkg/client/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).") + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).") } else { logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/resolution/pkg/client/clientset/versioned.Interface from context.") + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned.Interface from context.") } } return untyped.(versioned.Interface) @@ -241,3 +243,149 @@ func (w *wrapResolutionV1alpha1ResolutionRequestImpl) UpdateStatus(ctx context.C func (w *wrapResolutionV1alpha1ResolutionRequestImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return nil, errors.New("NYI: Watch") } + +// ResolutionV1beta1 retrieves the ResolutionV1beta1Client +func (w *wrapClient) ResolutionV1beta1() typedresolutionv1beta1.ResolutionV1beta1Interface { + return &wrapResolutionV1beta1{ + dyn: w.dyn, + } +} + +type wrapResolutionV1beta1 struct { + dyn dynamic.Interface +} + +func (w *wrapResolutionV1beta1) RESTClient() rest.Interface { + panic("RESTClient called on dynamic client!") +} + +func (w *wrapResolutionV1beta1) ResolutionRequests(namespace string) typedresolutionv1beta1.ResolutionRequestInterface { + return &wrapResolutionV1beta1ResolutionRequestImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "resolution.tekton.dev", + Version: "v1beta1", + Resource: "resolutionrequests", + }), + + namespace: namespace, + } +} + +type wrapResolutionV1beta1ResolutionRequestImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedresolutionv1beta1.ResolutionRequestInterface = (*wrapResolutionV1beta1ResolutionRequestImpl)(nil) + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Create(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.CreateOptions) (*v1beta1.ResolutionRequest, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "resolution.tekton.dev", + Version: "v1beta1", + Kind: "ResolutionRequest", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequest{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ResolutionRequest, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequest{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ResolutionRequestList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequestList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ResolutionRequest, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequest{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Update(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "resolution.tekton.dev", + Version: "v1beta1", + Kind: "ResolutionRequest", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequest{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) UpdateStatus(ctx context.Context, in *v1beta1.ResolutionRequest, opts v1.UpdateOptions) (*v1beta1.ResolutionRequest, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "resolution.tekton.dev", + Version: "v1beta1", + Kind: "ResolutionRequest", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &v1beta1.ResolutionRequest{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapResolutionV1beta1ResolutionRequestImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/client/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake/fake.go similarity index 82% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/client/fake/fake.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake/fake.go index 8e83fd67ec..0004b0aede 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/client/fake/fake.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake/fake.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ package fake import ( context "context" - fake "github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake" - client "github.com/tektoncd/resolution/pkg/client/injection/client" + fake "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake" + client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client" runtime "k8s.io/apimachinery/pkg/runtime" rest "k8s.io/client-go/rest" injection "knative.dev/pkg/injection" @@ -51,7 +51,7 @@ func Get(ctx context.Context) *fake.Clientset { untyped := ctx.Value(client.Key{}) if untyped == nil { logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake.Clientset from context.") + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake.Clientset from context.") } return untyped.(*fake.Clientset) } diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/factory.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/factory.go similarity index 82% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/factory.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/factory.go index 995691da25..6d79983a44 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/factory.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ package factory import ( context "context" - externalversions "github.com/tektoncd/resolution/pkg/client/informers/externalversions" - client "github.com/tektoncd/resolution/pkg/client/injection/client" + externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions" + client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client" controller "knative.dev/pkg/controller" injection "knative.dev/pkg/injection" logging "knative.dev/pkg/logging" @@ -50,7 +50,7 @@ func Get(ctx context.Context) externalversions.SharedInformerFactory { untyped := ctx.Value(Key{}) if untyped == nil { logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/resolution/pkg/client/informers/externalversions.SharedInformerFactory from context.") + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions.SharedInformerFactory from context.") } return untyped.(externalversions.SharedInformerFactory) } diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake/fake.go similarity index 80% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/fake/fake.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake/fake.go index 6b35441961..80d30060f9 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/factory/fake/fake.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake/fake.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,9 +21,9 @@ package fake import ( context "context" - externalversions "github.com/tektoncd/resolution/pkg/client/informers/externalversions" - fake "github.com/tektoncd/resolution/pkg/client/injection/client/fake" - factory "github.com/tektoncd/resolution/pkg/client/injection/informers/factory" + externalversions "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions" + fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake" + factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory" controller "knative.dev/pkg/controller" injection "knative.dev/pkg/injection" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go similarity index 80% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go index 3fc45b9a9a..8ebd3d0751 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/fake/fake.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +21,8 @@ package fake import ( context "context" - fake "github.com/tektoncd/resolution/pkg/client/injection/informers/factory/fake" - resolutionrequest "github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest" + fake "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory/fake" + resolutionrequest "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest" controller "knative.dev/pkg/controller" injection "knative.dev/pkg/injection" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go similarity index 82% rename from vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go index 1b1c69c5e0..80c875420f 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/resolutionrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,12 +21,12 @@ package resolutionrequest import ( context "context" - apisresolutionv1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - versioned "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - v1alpha1 "github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1" - client "github.com/tektoncd/resolution/pkg/client/injection/client" - factory "github.com/tektoncd/resolution/pkg/client/injection/informers/factory" - resolutionv1alpha1 "github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1" + apisresolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + versioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + v1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1" + client "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client" + factory "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/factory" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" cache "k8s.io/client-go/tools/cache" @@ -59,7 +59,7 @@ func Get(ctx context.Context) v1alpha1.ResolutionRequestInformer { untyped := ctx.Value(Key{}) if untyped == nil { logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1.ResolutionRequestInformer from context.") + "Unable to fetch github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1.ResolutionRequestInformer from context.") } return untyped.(v1alpha1.ResolutionRequestInformer) } diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/expansion_generated.go similarity index 96% rename from vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/expansion_generated.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/expansion_generated.go index fa4f441a58..23cfdaadc8 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/expansion_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/resolutionrequest.go similarity index 97% rename from vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/resolutionrequest.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/resolutionrequest.go index 5d248b137f..648ce7c6dc 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1/resolutionrequest.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1/resolutionrequest.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" + v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/expansion_generated.go new file mode 100644 index 0000000000..c5e6376e75 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// ResolutionRequestListerExpansion allows custom methods to be added to +// ResolutionRequestLister. +type ResolutionRequestListerExpansion interface{} + +// ResolutionRequestNamespaceListerExpansion allows custom methods to be added to +// ResolutionRequestNamespaceLister. +type ResolutionRequestNamespaceListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/resolutionrequest.go b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/resolutionrequest.go new file mode 100644 index 0000000000..e990684f0a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1beta1/resolutionrequest.go @@ -0,0 +1,99 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResolutionRequestLister helps list ResolutionRequests. +// All objects returned here must be treated as read-only. +type ResolutionRequestLister interface { + // List lists all ResolutionRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ResolutionRequest, err error) + // ResolutionRequests returns an object that can list and get ResolutionRequests. + ResolutionRequests(namespace string) ResolutionRequestNamespaceLister + ResolutionRequestListerExpansion +} + +// resolutionRequestLister implements the ResolutionRequestLister interface. +type resolutionRequestLister struct { + indexer cache.Indexer +} + +// NewResolutionRequestLister returns a new ResolutionRequestLister. +func NewResolutionRequestLister(indexer cache.Indexer) ResolutionRequestLister { + return &resolutionRequestLister{indexer: indexer} +} + +// List lists all ResolutionRequests in the indexer. +func (s *resolutionRequestLister) List(selector labels.Selector) (ret []*v1beta1.ResolutionRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ResolutionRequest)) + }) + return ret, err +} + +// ResolutionRequests returns an object that can list and get ResolutionRequests. +func (s *resolutionRequestLister) ResolutionRequests(namespace string) ResolutionRequestNamespaceLister { + return resolutionRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResolutionRequestNamespaceLister helps list and get ResolutionRequests. +// All objects returned here must be treated as read-only. +type ResolutionRequestNamespaceLister interface { + // List lists all ResolutionRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.ResolutionRequest, err error) + // Get retrieves the ResolutionRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.ResolutionRequest, error) + ResolutionRequestNamespaceListerExpansion +} + +// resolutionRequestNamespaceLister implements the ResolutionRequestNamespaceLister +// interface. +type resolutionRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResolutionRequests in the indexer for a given namespace. +func (s resolutionRequestNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ResolutionRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ResolutionRequest)) + }) + return ret, err +} + +// Get retrieves the ResolutionRequest from the indexer for a given namespace and name. +func (s resolutionRequestNamespaceLister) Get(name string) (*v1beta1.ResolutionRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("resolutionrequest"), name) + } + return obj.(*v1beta1.ResolutionRequest), nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cache/cache.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cache/cache.go index d04441dee1..b59c9020aa 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cache/cache.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cache/cache.go @@ -31,21 +31,8 @@ type eventData struct { Run *v1alpha1.Run `json:"run,omitempty"` } -// AddEventSentToCache adds the particular object to cache marking it as sent -func AddEventSentToCache(cacheClient *lru.Cache, event *cloudevents.Event) error { - if cacheClient == nil { - return errors.New("cache client is nil") - } - eventKey, err := EventKey(event) - if err != nil { - return err - } - cacheClient.Add(eventKey, nil) - return nil -} - -// IsCloudEventSent checks if the event exists in the cache -func IsCloudEventSent(cacheClient *lru.Cache, event *cloudevents.Event) (bool, error) { +// ContainsOrAddCloudEvent checks if the event exists in the cache +func ContainsOrAddCloudEvent(cacheClient *lru.Cache, event *cloudevents.Event) (bool, error) { if cacheClient == nil { return false, errors.New("cache client is nil") } @@ -53,7 +40,8 @@ func IsCloudEventSent(cacheClient *lru.Cache, event *cloudevents.Event) (bool, e if err != nil { return false, err } - return cacheClient.Contains(eventKey), nil + isPresent, _ := cacheClient.ContainsOrAdd(eventKey, nil) + return isPresent, nil } // EventKey defines whether an event is considered different from another diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloud_event_controller.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloud_event_controller.go index 5072708477..8e166002f4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloud_event_controller.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent/cloud_event_controller.go @@ -32,7 +32,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/clock" controller "knative.dev/pkg/controller" "knative.dev/pkg/logging" ) @@ -149,7 +149,7 @@ func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error logger.Debugf("Sending cloudevent of type %q", event.Type()) // In case of Run event, check cache if cloudevent is already sent if isRun { - cloudEventSent, err := cache.IsCloudEventSent(cacheClient, event) + cloudEventSent, err := cache.ContainsOrAddCloudEvent(cacheClient, event) if err != nil { logger.Errorf("error while checking cache: %s", err) } @@ -163,15 +163,10 @@ func SendCloudEventWithRetries(ctx context.Context, object runtime.Object) error recorder := controller.GetEventRecorder(ctx) if recorder == nil { logger.Warnf("No recorder in context, cannot emit error event") + return } recorder.Event(object, corev1.EventTypeWarning, "Cloud Event Failure", result.Error()) } - // In case of Run event, add to the cache to avoid duplicate events - if isRun { - if err := cache.AddEventSentToCache(cacheClient, event); err != nil { - logger.Errorf("error while adding sent event to cache: %s", err) - } - } }() return <-wasIn diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag/dag.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag/dag.go index 52d39eeac3..14da300559 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag/dag.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag/dag.go @@ -19,6 +19,7 @@ package dag import ( "errors" "fmt" + "sort" "strings" "github.com/tektoncd/pipeline/pkg/list" @@ -38,8 +39,8 @@ type Tasks interface { // Node represents a Task in a pipeline. type Node struct { - // Task represent the PipelineTask in Pipeline - Task Task + // Key represent a unique name of the node in a graph + Key string // Prev represent all the Previous task Nodes for the current Task Prev []*Node // Next represent all the Next task Nodes for the current Task @@ -62,7 +63,7 @@ func (g *Graph) addPipelineTask(t Task) (*Node, error) { return nil, errors.New("duplicate pipeline task") } newNode := &Node{ - Task: t, + Key: t.HashKey(), } g.Nodes[t.HashKey()] = newNode return newNode, nil @@ -79,6 +80,11 @@ func Build(tasks Tasks, deps map[string][]string) (*Graph, error) { } } + // Ensure no cycles in the graph + if err := findCyclesInDependencies(deps); err != nil { + return nil, fmt.Errorf("cycle detected; %w", err) + } + // Process all from and runAfter constraints to add task dependency for pt, taskDeps := range deps { for _, previousTask := range taskDeps { @@ -102,8 +108,8 @@ func GetCandidateTasks(g *Graph, doneTasks ...string) (sets.String, error) { visited := sets.NewString() for _, root := range roots { schedulable := findSchedulable(root, visited, tm) - for _, task := range schedulable { - d.Insert(task.HashKey()) + for _, taskName := range schedulable { + d.Insert(taskName) } } @@ -120,41 +126,72 @@ func GetCandidateTasks(g *Graph, doneTasks ...string) (sets.String, error) { return d, nil } -func linkPipelineTasks(prev *Node, next *Node) error { - // Check for self cycle - if prev.Task.HashKey() == next.Task.HashKey() { - return fmt.Errorf("cycle detected; task %q depends on itself", next.Task.HashKey()) - } - // Check if we are adding cycles. - path := []string{next.Task.HashKey(), prev.Task.HashKey()} - if err := lookForNode(prev.Prev, path, next.Task.HashKey()); err != nil { - return fmt.Errorf("cycle detected: %w", err) - } +func linkPipelineTasks(prev *Node, next *Node) { next.Prev = append(next.Prev, prev) prev.Next = append(prev.Next, next) - return nil } -func lookForNode(nodes []*Node, path []string, next string) error { - for _, n := range nodes { - path = append(path, n.Task.HashKey()) - if n.Task.HashKey() == next { - return errors.New(getVisitedPath(path)) +// use Kahn's algorithm to find cycles in dependencies +func findCyclesInDependencies(deps map[string][]string) error { + independentTasks := sets.NewString() + dag := make(map[string]sets.String, len(deps)) + childMap := make(map[string]sets.String, len(deps)) + for task, taskDeps := range deps { + if len(taskDeps) == 0 { + continue } - if err := lookForNode(n.Prev, path, next); err != nil { - return err + dag[task] = sets.NewString(taskDeps...) + for _, dep := range taskDeps { + if len(deps[dep]) == 0 { + independentTasks.Insert(dep) + } + if children, ok := childMap[dep]; ok { + children.Insert(task) + } else { + childMap[dep] = sets.NewString(task) + } } } - return nil + + for { + parent, ok := independentTasks.PopAny() + if !ok { + break + } + children := childMap[parent] + for { + child, ok := children.PopAny() + if !ok { + break + } + dag[child].Delete(parent) + if dag[child].Len() == 0 { + independentTasks.Insert(child) + delete(dag, child) + } + } + } + + return getInterdependencyError(dag) } -func getVisitedPath(path []string) string { - // Reverse the path since we traversed the Graph using prev pointers. - for i := len(path)/2 - 1; i >= 0; i-- { - opp := len(path) - 1 - i - path[i], path[opp] = path[opp], path[i] +func getInterdependencyError(dag map[string]sets.String) error { + if len(dag) == 0 { + return nil } - return strings.Join(path, " -> ") + firstChild := "" + for task := range dag { + if firstChild == "" || firstChild > task { + firstChild = task + } + } + deps := dag[firstChild].List() + depNames := make([]string, 0, len(deps)) + sort.Strings(deps) + for _, dep := range deps { + depNames = append(depNames, fmt.Sprintf("%q", dep)) + } + return fmt.Errorf("task %q depends on %s", firstChild, strings.Join(depNames, ", ")) } func addLink(pt string, previousTask string, nodes map[string]*Node) error { @@ -163,9 +200,7 @@ func addLink(pt string, previousTask string, nodes map[string]*Node) error { return fmt.Errorf("task %s depends on %s but %s wasn't present in Pipeline", pt, previousTask, previousTask) } next := nodes[pt] - if err := linkPipelineTasks(prev, next); err != nil { - return fmt.Errorf("couldn't create link from %s to %s: %w", prev.Task.HashKey(), next.Task.HashKey(), err) - } + linkPipelineTasks(prev, next) return nil } @@ -179,16 +214,16 @@ func getRoots(g *Graph) []*Node { return n } -func findSchedulable(n *Node, visited sets.String, doneTasks sets.String) []Task { - if visited.Has(n.Task.HashKey()) { - return []Task{} +func findSchedulable(n *Node, visited sets.String, doneTasks sets.String) []string { + if visited.Has(n.Key) { + return []string{} } - visited.Insert(n.Task.HashKey()) - if doneTasks.Has(n.Task.HashKey()) { - schedulable := []Task{} + visited.Insert(n.Key) + if doneTasks.Has(n.Key) { + schedulable := []string{} // This one is done! Take note of it and look at the next candidate for _, next := range n.Next { - if _, ok := visited[next.Task.HashKey()]; !ok { + if _, ok := visited[next.Key]; !ok { schedulable = append(schedulable, findSchedulable(next, visited, doneTasks)...) } } @@ -197,10 +232,10 @@ func findSchedulable(n *Node, visited sets.String, doneTasks sets.String) []Task // This one isn't done! Return it if it's schedulable if isSchedulable(doneTasks, n.Prev) { // FIXME(vdemeester) - return []Task{n.Task} + return []string{n.Key} } // This one isn't done, but it also isn't ready to schedule - return []Task{} + return []string{} } func isSchedulable(doneTasks sets.String, prevs []*Node) bool { @@ -209,8 +244,8 @@ func isSchedulable(doneTasks sets.String, prevs []*Node) bool { } collected := []string{} for _, n := range prevs { - if doneTasks.Has(n.Task.HashKey()) { - collected = append(collected, n.Task.HashKey()) + if doneTasks.Has(n.Key) { + collected = append(collected, n.Key) } } return len(collected) == len(prevs) diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/annotations.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/annotations.go similarity index 84% rename from vendor/github.com/tektoncd/resolution/pkg/common/annotations.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/annotations.go index f55b4f3340..888e382169 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/common/annotations.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/annotations.go @@ -16,8 +16,10 @@ limitations under the License. package common +import "github.com/tektoncd/pipeline/pkg/apis/resolution" + const ( // AnnotationKeyContentType is the annotation key passed back // with a resolved resource's content type. - AnnotationKeyContentType = "content-type" + AnnotationKeyContentType = resolution.GroupName + "/content-type" ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/context.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/context.go new file mode 100644 index 0000000000..814d4d2c36 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/context.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import "context" + +// contextKey is a unique type to map common request-scoped +// context information. +type contextKey struct{} + +// requestNamespaceContextKey is the key stored in a context alongside +// the string namespace of a resolution request. +var requestNamespaceContextKey = contextKey{} + +// InjectRequestNamespace returns a new context with a request-scoped +// namespace. This value may only be set once per request; subsequent +// calls with the same context or a derived context will be ignored. +func InjectRequestNamespace(ctx context.Context, namespace string) context.Context { + // Once set don't allow the value to be overwritten. + if val := ctx.Value(requestNamespaceContextKey); val != nil { + return ctx + } + return context.WithValue(ctx, requestNamespaceContextKey, namespace) +} + +// RequestNamespace returns the namespace of the resolution request +// currently being processed or an empty string if the request somehow +// does not originate from a namespaced location. +func RequestNamespace(ctx context.Context) string { + if val := ctx.Value(requestNamespaceContextKey); val != nil { + if str, ok := val.(string); ok { + return str + } + } + return "" +} diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/doc.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/common/doc.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/doc.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/errors.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/errors.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/common/errors.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/errors.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/labels.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/labels.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/common/labels.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/labels.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/messages.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/messages.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/common/messages.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/messages.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/common/statuses.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/common/statuses.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/common/statuses.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/common/statuses.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/resource/crd_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/crd_resource.go similarity index 94% rename from vendor/github.com/tektoncd/resolution/pkg/resource/crd_resource.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/crd_resource.go index e231c884b6..814f8845c8 100644 --- a/vendor/github.com/tektoncd/resolution/pkg/resource/crd_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/crd_resource.go @@ -22,10 +22,10 @@ import ( "errors" "fmt" - "github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1" - rrclient "github.com/tektoncd/resolution/pkg/client/clientset/versioned" - rrlisters "github.com/tektoncd/resolution/pkg/client/listers/resolution/v1alpha1" - resolutioncommon "github.com/tektoncd/resolution/pkg/common" + "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" + rrclient "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + rrlisters "github.com/tektoncd/pipeline/pkg/client/resolution/listers/resolution/v1alpha1" + resolutioncommon "github.com/tektoncd/pipeline/pkg/resolution/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) diff --git a/vendor/github.com/tektoncd/resolution/pkg/resource/name.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/name.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/resource/name.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/name.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/resource/request.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/request.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/resource/request.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/request.go diff --git a/vendor/github.com/tektoncd/resolution/pkg/resource/resource.go b/vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/resource.go similarity index 100% rename from vendor/github.com/tektoncd/resolution/pkg/resource/resource.go rename to vendor/github.com/tektoncd/pipeline/pkg/resolution/resource/resource.go diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index f98d03569b..78807fc33a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -50,7 +50,7 @@ var intIndexRegex = regexp.MustCompile(intIndex) // ValidateVariable makes sure all variables in the provided string are known func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if !vars.Has(v) { @@ -66,7 +66,7 @@ func ValidateVariable(name, value, prefix, locationName, path string, vars sets. // ValidateVariableP makes sure all variables for a parameter in the provided string are known func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -90,7 +90,7 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError // ValidateVariableProhibited verifies that variables matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibited(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { for _, v := range vs { v = strings.TrimSuffix(v, "[*]") if vars.Has(v) { @@ -106,7 +106,7 @@ func ValidateVariableProhibited(name, value, prefix, locationName, path string, // ValidateVariableProhibitedP verifies that variables for a parameter matching the relevant string expressions do not reference any of the names present in vars. func ValidateVariableProhibitedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -155,7 +155,7 @@ func ValidateEntireVariableProhibitedP(value, prefix string, vars sets.String) * // ValidateVariableIsolated verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolated(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { - if vs, present, _ := extractVariablesFromString(value, prefix); present { + if vs, present, _ := ExtractVariablesFromString(value, prefix); present { firstMatch, _ := extractExpressionFromString(value, prefix) for _, v := range vs { v = strings.TrimSuffix(v, "[*]") @@ -174,7 +174,7 @@ func ValidateVariableIsolated(name, value, prefix, locationName, path string, va // ValidateVariableIsolatedP verifies that variables matching the relevant string expressions are completely isolated if present. func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.FieldError { - if vs, present, errString := extractVariablesFromString(value, prefix); present { + if vs, present, errString := ExtractVariablesFromString(value, prefix); present { if errString != "" { return &apis.FieldError{ Message: errString, @@ -222,7 +222,7 @@ func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, v return false, nil } -// Extract a the first full string expressions found (e.g "$(input.params.foo)"). Return +// extract a the first full string expressions found (e.g "$(input.params.foo)"). Return // "" and false if nothing is found. func extractExpressionFromString(s, prefix string) (string, bool) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) @@ -234,11 +234,16 @@ func extractExpressionFromString(s, prefix string) (string, bool) { return match[0], true } -func extractVariablesFromString(s, prefix string) ([]string, bool, string) { +// ExtractVariablesFromString extracts variables from an input string s with the given prefix via regex matching. +// It returns a slice of strings which contains the extracted variables, a bool flag to indicate if matches were found +// and the error string if the referencing of parameters is invalid. +// If the string does not contain the input prefix then the output will contain an empty slice of strings. +func ExtractVariablesFromString(s, prefix string) ([]string, bool, string) { pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution) re := regexp.MustCompile(pattern) matches := re.FindAllStringSubmatch(s, -1) errString := "" + // Input string does not contain the prefix and therefore not matches are found. if len(matches) == 0 { return []string{}, false, "" } diff --git a/vendor/github.com/tektoncd/pipeline/test/README.md b/vendor/github.com/tektoncd/pipeline/test/README.md index 46bb6967a1..4b4e469232 100644 --- a/vendor/github.com/tektoncd/pipeline/test/README.md +++ b/vendor/github.com/tektoncd/pipeline/test/README.md @@ -2,20 +2,36 @@ To run tests: -```shell -# Land the latest codes -ko apply -R -f ./config/ - +[Unit tests](#unit-tests) and build tests (those run by [presubmits](#presubmit-tests)) run against your Pipelines clone: +```sh # Unit tests go test ./... +# Build tests +./test/presubmit-tests.sh --build-tests +``` + +[E2E tests](#end-to-end-tests) run test cases in your local Pipelines clone +against the Pipelines installation on your current kube cluster. +To ensure your local changes are reflected on your cluster, you must first build +and install them with `ko apply -R -f ./config/`. -# Integration tests (against your current kube cluster) +```shell +# Integration tests go test -v -count=1 -tags=e2e -timeout=20m ./test -# Conformance tests (against your current kube cluster) +# Conformance tests go test -v -count=1 -tags=conformance -timeout=10m ./test ``` +By running the commands above, you start the tests on the cluster of `current-context` +in local kubeconfig file (~/.kube/config by default) in you local machine. + +> Sometimes local tests pass but presubmit tests fail, one possible reason +is the difference of running environments. The envs that our presubmit test +uses are stored in ./*.env files. Specifically, +> - e2e-tests-kind-prow-alpha.env for [`pull-tekton-pipeline-alpha-integration-tests`](https://github.com/tektoncd/plumbing/blob/d2c8ccb63d02c6e72c62def788af32d63ff1981a/prow/config.yaml#L1304) +> - e2e-tests-kind-prow.env for [`pull-tekton-pipeline-integration-tests`](https://github.com/tektoncd/plumbing/blob/d2c8ccb63d02c6e72c62def788af32d63ff1981a/prow/config.yaml#L1249) + ## Unit tests Unit tests live side by side with the code they are testing and can be run with: diff --git a/vendor/github.com/tektoncd/pipeline/test/clients.go b/vendor/github.com/tektoncd/pipeline/test/clients.go index 3e0510e784..e8ea88d149 100644 --- a/vendor/github.com/tektoncd/pipeline/test/clients.go +++ b/vendor/github.com/tektoncd/pipeline/test/clients.go @@ -45,6 +45,8 @@ import ( "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" + resolutionversioned "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/typed/resolution/v1alpha1" resourceversioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1" "k8s.io/client-go/kubernetes" @@ -55,13 +57,14 @@ import ( type clients struct { KubeClient kubernetes.Interface - PipelineClient v1beta1.PipelineInterface - ClusterTaskClient v1beta1.ClusterTaskInterface - TaskClient v1beta1.TaskInterface - TaskRunClient v1beta1.TaskRunInterface - PipelineRunClient v1beta1.PipelineRunInterface - PipelineResourceClient resourcev1alpha1.PipelineResourceInterface - RunClient v1alpha1.RunInterface + PipelineClient v1beta1.PipelineInterface + ClusterTaskClient v1beta1.ClusterTaskInterface + TaskClient v1beta1.TaskInterface + TaskRunClient v1beta1.TaskRunInterface + PipelineRunClient v1beta1.PipelineRunInterface + PipelineResourceClient resourcev1alpha1.PipelineResourceInterface + RunClient v1alpha1.RunInterface + ResolutionRequestclient resolutionv1alpha1.ResolutionRequestInterface } // newClients instantiates and returns several clientsets required for making requests to the @@ -89,7 +92,11 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client } rcs, err := resourceversioned.NewForConfig(cfg) if err != nil { - t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) + t.Fatalf("failed to create pipeline resource clientset from config file at %s: %s", configPath, err) + } + rrcs, err := resolutionversioned.NewForConfig(cfg) + if err != nil { + t.Fatalf("failed to create resolution clientset from config file at %s: %s", configPath, err) } c.PipelineClient = cs.TektonV1beta1().Pipelines(namespace) c.ClusterTaskClient = cs.TektonV1beta1().ClusterTasks() @@ -98,5 +105,6 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client c.PipelineRunClient = cs.TektonV1beta1().PipelineRuns(namespace) c.PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) c.RunClient = cs.TektonV1alpha1().Runs(namespace) + c.ResolutionRequestclient = rrcs.ResolutionV1alpha1().ResolutionRequests(namespace) return c } diff --git a/vendor/github.com/tektoncd/pipeline/test/controller.go b/vendor/github.com/tektoncd/pipeline/test/controller.go index 5c996054b5..309ab715b2 100644 --- a/vendor/github.com/tektoncd/pipeline/test/controller.go +++ b/vendor/github.com/tektoncd/pipeline/test/controller.go @@ -25,6 +25,7 @@ import ( // Link in the fakes so they get injected into injection.Fake "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resolutionv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resolution/v1alpha1" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" @@ -36,15 +37,15 @@ import ( fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake" faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task/fake" faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake" + fakeresolutionclientset "github.com/tektoncd/pipeline/pkg/client/resolution/clientset/versioned/fake" + resolutioninformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resolution/informers/externalversions/resolution/v1alpha1" + fakeresolutionrequestclient "github.com/tektoncd/pipeline/pkg/client/resolution/injection/client/fake" + fakeresolutionrequestinformer "github.com/tektoncd/pipeline/pkg/client/resolution/injection/informers/resolution/v1alpha1/resolutionrequest/fake" fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/fake" cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" - fakeresolutionclientset "github.com/tektoncd/resolution/pkg/client/clientset/versioned/fake" - resolutioninformersv1alpha1 "github.com/tektoncd/resolution/pkg/client/informers/externalversions/resolution/v1alpha1" - fakeresolutionrequestclient "github.com/tektoncd/resolution/pkg/client/injection/client/fake" - fakeresolutionrequestinformer "github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest/fake" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -67,18 +68,19 @@ import ( // Data represents the desired state of the system (i.e. existing resources) to seed controllers // with. type Data struct { - PipelineRuns []*v1beta1.PipelineRun - Pipelines []*v1beta1.Pipeline - TaskRuns []*v1beta1.TaskRun - Tasks []*v1beta1.Task - ClusterTasks []*v1beta1.ClusterTask - PipelineResources []*resourcev1alpha1.PipelineResource - Runs []*v1alpha1.Run - Pods []*corev1.Pod - Namespaces []*corev1.Namespace - ConfigMaps []*corev1.ConfigMap - ServiceAccounts []*corev1.ServiceAccount - LimitRange []*corev1.LimitRange + PipelineRuns []*v1beta1.PipelineRun + Pipelines []*v1beta1.Pipeline + TaskRuns []*v1beta1.TaskRun + Tasks []*v1beta1.Task + ClusterTasks []*v1beta1.ClusterTask + PipelineResources []*resourcev1alpha1.PipelineResource + Runs []*v1alpha1.Run + Pods []*corev1.Pod + Namespaces []*corev1.Namespace + ConfigMaps []*corev1.ConfigMap + ServiceAccounts []*corev1.ServiceAccount + LimitRange []*corev1.LimitRange + ResolutionRequests []*resolutionv1alpha1.ResolutionRequest } // Clients holds references to clients which are useful for reconciler tests. @@ -269,6 +271,12 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers } } c.ResolutionRequests.PrependReactor("*", "resolutionrequests", AddToInformer(t, i.ResolutionRequest.Informer().GetIndexer())) + for _, rr := range d.ResolutionRequests { + rr := rr.DeepCopy() // Avoid assumptions that the informer's copy is modified. + if _, err := c.ResolutionRequests.ResolutionV1alpha1().ResolutionRequests(rr.Namespace).Create(ctx, rr, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + } c.Pipeline.ClearActions() c.Kube.ClearActions() c.ResolutionRequests.ClearActions() diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh index 585a96c0f5..1106d64fd2 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh @@ -21,13 +21,19 @@ source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scri function install_pipeline_crd() { echo ">> Deploying Tekton Pipelines" local ko_target="$(mktemp)" - ko resolve -R -f config/ > "${ko_target}" || fail_test "Pipeline image resolve failed" + ko resolve -l 'app.kubernetes.io/component!=resolvers' -R -f config/ > "${ko_target}" || fail_test "Pipeline image resolve failed" cat "${ko_target}" | sed -e 's%"level": "info"%"level": "debug"%' \ | sed -e 's%loglevel.controller: "info"%loglevel.controller: "debug"%' \ | sed -e 's%loglevel.webhook: "info"%loglevel.webhook: "debug"%' \ | kubectl apply -R -f - || fail_test "Build pipeline installation failed" + verify_pipeline_installation + if [ "${PIPELINE_FEATURE_GATE}" == "alpha" ]; then + ko apply -f config/resolvers || fail_test "Resolvers installation failed" + verify_resolvers_installation + fi + export SYSTEM_NAMESPACE=tekton-pipelines } @@ -35,6 +41,11 @@ function install_pipeline_crd() { function install_pipeline_crd_version() { echo ">> Deploying Tekton Pipelines of Version $1" kubectl apply -f "https://github.com/tektoncd/pipeline/releases/download/$1/release.yaml" || fail_test "Build pipeline installation failed of Version $1" + + if [ "${PIPELINE_FEATURE_GATE}" == "alpha" ]; then + kubectl apply -f "https://github.com/tektoncd/pipeline/releases/download/$1/resolvers.yaml" || fail_test "Resolvers installation failed of Version $1" + fi + verify_pipeline_installation } @@ -46,6 +57,14 @@ function verify_pipeline_installation() { wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up" } +function verify_resolvers_installation() { + # Make sure that everything is cleaned up in the current namespace. + delete_resolvers_resources + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-pipelines-resolvers || fail_test "Tekton Pipeline Resolvers did not come up" +} + function uninstall_pipeline_crd() { echo ">> Uninstalling Tekton Pipelines" ko delete --ignore-not-found=true -R -f config/ @@ -58,6 +77,10 @@ function uninstall_pipeline_crd_version() { echo ">> Uninstalling Tekton Pipelines of version $1" kubectl delete --ignore-not-found=true -f "https://github.com/tektoncd/pipeline/releases/download/$1/release.yaml" + if [ "${PIPELINE_FEATURE_GATE}" == "alpha" ]; then + kubectl delete --ignore-not-found=true -f "https://github.com/tektoncd/pipeline/releases/download/$1/resolvers.yaml" + fi + # Make sure that everything is cleaned up in the current namespace. delete_pipeline_resources } @@ -67,3 +90,7 @@ function delete_pipeline_resources() { kubectl delete --ignore-not-found=true ${res}.tekton.dev --all done } + +function delete_resolvers_resources() { + kubectl delete --ignore-not-found=true resolutionrequests.resolution.tekton.dev --all +} diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh index 7a23a8016e..44c1c2ed94 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh @@ -43,14 +43,24 @@ failed=0 function set_feature_gate() { local gate="$1" + local resolver="false" if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then printf "Invalid gate %s\n" ${gate} exit 255 fi + if [ "$gate" == "alpha" ]; then + resolver="true" + fi printf "Setting feature gate to %s\n", ${gate} jsonpatch=$(printf "{\"data\": {\"enable-api-fields\": \"%s\"}}" $1) echo "feature-flags ConfigMap patch: ${jsonpatch}" kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch" + if [ "$gate" == "alpha" ]; then + printf "enabling resolvers\n" + jsonpatch=$(printf "{\"data\": {\"enable-git-resolver\": \"true\", \"enable-hub-resolver\": \"true\", \"enable-bundles-resolver\": \"true\", \"enable-cluster-resolver\": \"true\"}}") + echo "resolvers-feature-flags ConfigMap patch: ${jsonpatch}" + kubectl patch configmap resolvers-feature-flags -n tekton-pipelines-resolvers -p "$jsonpatch" + fi } function set_embedded_status() { diff --git a/vendor/github.com/tektoncd/pipeline/test/featureflags.go b/vendor/github.com/tektoncd/pipeline/test/featureflags.go index e901e2fba8..59fb1d767f 100644 --- a/vendor/github.com/tektoncd/pipeline/test/featureflags.go +++ b/vendor/github.com/tektoncd/pipeline/test/featureflags.go @@ -6,10 +6,11 @@ import ( "strings" "testing" - "k8s.io/client-go/kubernetes" - "github.com/tektoncd/pipeline/pkg/apis/config" + resolverconfig "github.com/tektoncd/pipeline/pkg/apis/config/resolver" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "knative.dev/pkg/system" ) @@ -23,15 +24,67 @@ func requireAnyGate(gates map[string]string) func(context.Context, *testing.T, * if err != nil { t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) } + resolverFeatureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(resolverconfig.ResolversNamespace(system.Namespace())). + Get(ctx, resolverconfig.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + t.Fatalf("Failed to get ConfigMap `%s`: %s", resolverconfig.GetFeatureFlagsConfigName(), err) + } + resolverMap := make(map[string]string) + if resolverFeatureFlagsCM != nil { + resolverMap = resolverFeatureFlagsCM.Data + } pairs := []string{} for name, value := range gates { actual, ok := featureFlagsCM.Data[name] if ok && value == actual { return } + actual, ok = resolverMap[name] + if ok && value == actual { + return + } pairs = append(pairs, fmt.Sprintf("%q: %q", name, value)) } - t.Skipf("No feature flag in namespace %q matching %s\nExisting feature flag: %#v", system.Namespace(), strings.Join(pairs, " or "), featureFlagsCM.Data) + t.Skipf("No feature flag in namespace %q matching %s\nExisting feature flag: %#v\nExisting resolver feature flag (in namespace %q): %#v", + system.Namespace(), strings.Join(pairs, " or "), featureFlagsCM.Data, + resolverconfig.ResolversNamespace(system.Namespace()), resolverMap) + } +} + +// requireAllgates returns a setup func that will skip the current +// test if all of the feature-flags in the given map don't match +// what's in the feature-flags ConfigMap. It will fatally fail +// the test if it cannot get the feature-flag configmap. +func requireAllGates(gates map[string]string) func(context.Context, *testing.T, *clients, string) { + return func(ctx context.Context, t *testing.T, c *clients, namespace string) { + featureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(system.Namespace()).Get(ctx, config.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", config.GetFeatureFlagsConfigName(), err) + } + resolverFeatureFlagsCM, err := c.KubeClient.CoreV1().ConfigMaps(resolverconfig.ResolversNamespace(system.Namespace())). + Get(ctx, resolverconfig.GetFeatureFlagsConfigName(), metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + t.Fatalf("Failed to get ConfigMap `%s`: %s", resolverconfig.GetFeatureFlagsConfigName(), err) + } + resolverMap := make(map[string]string) + if resolverFeatureFlagsCM != nil { + resolverMap = resolverFeatureFlagsCM.Data + } + pairs := []string{} + for name, value := range gates { + actual, ok := featureFlagsCM.Data[name] + if !ok { + actual, ok = resolverMap[name] + if !ok || value != actual { + pairs = append(pairs, fmt.Sprintf("%q is %q, want %s", name, actual, value)) + } + } else if value != actual { + pairs = append(pairs, fmt.Sprintf("%q is %q, want %s", name, actual, value)) + } + } + if len(pairs) > 0 { + t.Skipf("One or more feature flags not matching required: %s", strings.Join(pairs, "; ")) + } } } diff --git a/vendor/github.com/tektoncd/pipeline/test/kubectl.go b/vendor/github.com/tektoncd/pipeline/test/kubectl.go new file mode 100644 index 0000000000..9cbce494da --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/kubectl.go @@ -0,0 +1,34 @@ +/* + Copyright 2022 The Tekton Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package test + +import ( + "bytes" + "os/exec" + "regexp" +) + +var ( + defaultNamespaceRE = regexp.MustCompile("namespace: default") +) + +func kubectlCreate(input []byte, namespace string) ([]byte, error) { + cmd := exec.Command("kubectl", "create", "-n", namespace, "-f", "-") + cmd.Stdin = bytes.NewReader(input) + return cmd.CombinedOutput() +} diff --git a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh index 3cb616f20c..76cb5b0a88 100644 --- a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh @@ -61,7 +61,7 @@ function ko_resolve() { header "Running `ko resolve`" cat < .ko.yaml - defaultBaseImage: ghcr.io/distroless/static + defaultBaseImage: distroless.dev/static baseImageOverrides: # Use the combined base image for images that should include Windows support. # NOTE: Make sure this list of images to use the combined base image is in sync with what's in tekton/publish.yaml's 'create-ko-yaml' Task. @@ -69,10 +69,11 @@ function ko_resolve() { github.com/tektoncd/pipeline/cmd/nop: gcr.io/tekton-releases/github.com/tektoncd/pipeline/combined-base-image:latest github.com/tektoncd/pipeline/cmd/workingdirinit: gcr.io/tekton-releases/github.com/tektoncd/pipeline/combined-base-image:latest - github.com/tektoncd/pipeline/cmd/git-init: ghcr.io/distroless/git + github.com/tektoncd/pipeline/cmd/git-init: distroless.dev/git EOF - KO_DOCKER_REPO=example.com ko resolve --platform=all --push=false -R -f config 1>/dev/null + KO_DOCKER_REPO=example.com ko resolve -l 'app.kubernetes.io/component!=resolvers' --platform=all --push=false -R -f config 1>/dev/null + KO_DOCKER_REPO=example.com ko resolve --platform=all --push=false -f config/resolvers 1>/dev/null } function post_build_tests() { diff --git a/vendor/github.com/tektoncd/pipeline/test/resolution.go b/vendor/github.com/tektoncd/pipeline/test/resolution.go index 026ab99e23..4238963330 100644 --- a/vendor/github.com/tektoncd/pipeline/test/resolution.go +++ b/vendor/github.com/tektoncd/pipeline/test/resolution.go @@ -2,8 +2,11 @@ package test import ( "context" + "errors" + "fmt" + "strings" - resolution "github.com/tektoncd/resolution/pkg/resource" + resolution "github.com/tektoncd/pipeline/pkg/resolution/resource" ) var _ resolution.Requester = &Requester{} @@ -37,12 +40,32 @@ type Requester struct { ResolvedResource resolution.ResolvedResource // An error to return when a request is submitted. SubmitErr error + // Params that should match those on the request in order to return the resolved resource + Params map[string]string } // Submit implements resolution.Requester, accepting the name of a // resolver and a request for a specific remote file, and then returns // whatever mock data was provided on initialization. func (r *Requester) Submit(ctx context.Context, resolverName resolution.ResolverName, req resolution.Request) (resolution.ResolvedResource, error) { + if len(r.Params) == 0 { + return r.ResolvedResource, r.SubmitErr + } + reqParams := make(map[string]string) + for k, v := range req.Params() { + reqParams[k] = v + } + + var wrongParams []string + for k, v := range r.Params { + if reqValue, ok := reqParams[k]; !ok || reqValue != v { + wrongParams = append(wrongParams, fmt.Sprintf("expected %s param to be %s, but was %s", k, v, reqValue)) + } + } + if len(wrongParams) > 0 { + return nil, errors.New(strings.Join(wrongParams, "; ")) + } + return r.ResolvedResource, r.SubmitErr } diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/doc.go b/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/doc.go deleted file mode 100644 index 4c05a6de94..0000000000 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +groupName=resolution.tekton.dev -package v1alpha1 diff --git a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go b/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go deleted file mode 100644 index ab59814297..0000000000 --- a/vendor/github.com/tektoncd/resolution/pkg/apis/resolution/v1alpha1/resolution_request_defaults.go +++ /dev/null @@ -1,14 +0,0 @@ -package v1alpha1 - -import "context" - -// SetDefaults walks a ResolutionRequest object and sets any default -// values that are required to be set before a reconciler sees it. -func (rr *ResolutionRequest) SetDefaults(ctx context.Context) { - if rr.TypeMeta.Kind == "" { - rr.TypeMeta.Kind = "ResolutionRequest" - } - if rr.TypeMeta.APIVersion == "" { - rr.TypeMeta.APIVersion = "resolution.tekton.dev/v1alpha1" - } -} diff --git a/vendor/github.com/theupdateframework/go-tuf/.gitattributes b/vendor/github.com/theupdateframework/go-tuf/.gitattributes new file mode 100644 index 0000000000..2fad43c96a --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/.gitattributes @@ -0,0 +1,5 @@ +# go enforces lf line endings +*.go eol=lf + +# testdata should not be mangled by git +*.json binary diff --git a/vendor/github.com/theupdateframework/go-tuf/.golangci.yml b/vendor/github.com/theupdateframework/go-tuf/.golangci.yml new file mode 100644 index 0000000000..6e8bf3c86a --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/.golangci.yml @@ -0,0 +1,15 @@ +run: + # Lint using Go 1.17, since some linters are disabled by default for Go 1.18 + # until generics are supported. + # See https://github.com/golangci/golangci-lint/issues/2649 + go: '1.17' + +linters: + disable-all: true + enable: + - staticcheck + - gofmt + - govet + - gosimple + - unused + - typecheck diff --git a/vendor/github.com/theupdateframework/go-tuf/ALUMNI b/vendor/github.com/theupdateframework/go-tuf/ALUMNI deleted file mode 100644 index 294df41397..0000000000 --- a/vendor/github.com/theupdateframework/go-tuf/ALUMNI +++ /dev/null @@ -1,4 +0,0 @@ -Christian Rebischke (github: shibumi) -Erick Tryzelaar (github: erickt) -Jonathan Rudenberg (github: titanous) -Lewis Marshall (github: lmars) \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/CONTRIBUTING.md b/vendor/github.com/theupdateframework/go-tuf/CONTRIBUTING.md deleted file mode 100644 index 71b39ce267..0000000000 --- a/vendor/github.com/theupdateframework/go-tuf/CONTRIBUTING.md +++ /dev/null @@ -1 +0,0 @@ -See the [Flynn contributing guide](https://flynn.io/docs/contributing). diff --git a/vendor/github.com/theupdateframework/go-tuf/MAINTAINERS b/vendor/github.com/theupdateframework/go-tuf/MAINTAINERS deleted file mode 100644 index d3443b1ab4..0000000000 --- a/vendor/github.com/theupdateframework/go-tuf/MAINTAINERS +++ /dev/null @@ -1,6 +0,0 @@ -Asra Ali (github: asraa) -Trishank Karthik Kuppusamy (github: trishankatdatadog) -Joshua Lock (github: joshuagl) -Ethan Lownman (github: ethan-lowman-dd) -Marina Moore (github: mnm678) -Hossein Siadati (github: hosseinsia) diff --git a/vendor/github.com/theupdateframework/go-tuf/README.md b/vendor/github.com/theupdateframework/go-tuf/README.md index 2ba0a7b764..b1a4b6eabb 100644 --- a/vendor/github.com/theupdateframework/go-tuf/README.md +++ b/vendor/github.com/theupdateframework/go-tuf/README.md @@ -32,7 +32,7 @@ The directories contain the following files: ### Install -`go-tuf` is tested on Go versions 1.16 and 1.17. +`go-tuf` is tested on Go versions 1.18. ```bash go get github.com/theupdateframework/go-tuf/cmd/tuf @@ -130,6 +130,32 @@ Changes the passphrase for given role keys file. The CLI supports reading both the existing and the new passphrase via the following environment variables - `TUF_{{ROLE}}_PASSPHRASE` and respectively `TUF_NEW_{{ROLE}}_PASSPHRASE` +#### `tuf payload ` + +Outputs the metadata file for a role in a ready-to-sign (canonicalized) format. + +See also `tuf sign-payload` and `tuf add-signatures`. + +#### `tuf sign-payload --role= ` + +Sign a file (outside of the TUF repo) using keys (in the TUF keys database, +typically produced by `tuf gen-key`) for the given `role` (from the TUF repo). + +Typically, `path` will be a file containing the output of `tuf payload`. + +See also `tuf add-signatures`. + +#### `tuf add-signatures --signatures ` + + +Adds signatures (the output of `tuf sign-payload`) to the given role metadata file. + +If the signature does not verify, it will not be added. + +#### `tuf status --valid-at ` + +Check if the role's metadata will be expired on the given date. + #### Usage of environment variables The `tuf` CLI supports receiving passphrases via environment variables in @@ -229,6 +255,46 @@ Enter root keys passphrase: The staged `root.json` can now be copied back to the repo box ready to be committed alongside other metadata files. +#### Alternate signing flow + +Instead of manually copying `root.json` into the TUF repository on the root box, +you can use the `tuf payload`, `tuf sign-payload`, `tuf add-signatures` flow. + +On the repo box, get the `root.json` payload in a canonical format: + +``` bash +$ tuf payload root.json > root.json.payload +``` + +Copy `root.json.payload` to the root box and sign it: + + +``` bash +$ tuf sign-payload --role=root root.json.payload > root.json.sigs +Enter root keys passphrase: +``` + +Copy `root.json.sigs` back to the repo box and import the signatures: + +``` bash +$ tuf add-signatures --signatures root.json.sigs root.json +``` + +This achieves the same state as the above flow for the repo box: + +```bash +$ tree . +. +├── keys +│   ├── snapshot.json +│   ├── targets.json +│   └── timestamp.json +├── repository +└── staged + ├── root.json + └── targets +``` + #### Add a target file Assuming a staged, signed `root` metadata file and the file to add exists at @@ -543,12 +609,14 @@ For the client package, see https://godoc.org/github.com/theupdateframework/go-t For the client CLI, see https://github.com/theupdateframework/go-tuf/tree/master/cmd/tuf-client. -## Development +## Contributing and Development -For local development, `go-tuf` requires Go version 1.16 or 1.17. +For local development, `go-tuf` requires Go version 1.18. The [Python interoperability tests](client/python_interop/) require Python 3 (available as `python` on the `$PATH`) and the [`python-tuf` package](https://github.com/theupdateframework/python-tuf) installed (`pip install tuf`). To update the data for these tests requires Docker and make (see test data [README.md](client/python_interop/testdata/README.md) for details). + +Please see [CONTRIBUTING.md](docs/CONTRIBUTING.md) for contribution guidelines before making your first contribution! diff --git a/vendor/github.com/theupdateframework/go-tuf/client/client.go b/vendor/github.com/theupdateframework/go-tuf/client/client.go index 893e1610a1..17ddc98058 100644 --- a/vendor/github.com/theupdateframework/go-tuf/client/client.go +++ b/vendor/github.com/theupdateframework/go-tuf/client/client.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "io" - "io/ioutil" "github.com/theupdateframework/go-tuf/data" "github.com/theupdateframework/go-tuf/util" @@ -69,10 +68,10 @@ type Client struct { // The following four fields represent the versions of metatdata either // from local storage or from recently downloaded metadata - rootVer int - targetsVer int - snapshotVer int - timestampVer int + rootVer int64 + targetsVer int64 + snapshotVer int64 + timestampVer int64 // targets is the list of available targets, either from local storage // or from recently downloaded targets metadata @@ -106,56 +105,13 @@ func NewClient(local LocalStore, remote RemoteStore) *Client { } } -// Init initializes a local repository. -// -// The latest root.json is fetched from remote storage, verified using rootKeys -// and threshold, and then saved in local storage. It is expected that rootKeys -// were securely distributed with the software being updated. -// -// Deprecated: Use c.InitLocal and c.Update to initialize a local repository. -func (c *Client) Init(rootKeys []*data.PublicKey, threshold int) error { - if len(rootKeys) < threshold { - return ErrInsufficientKeys - } - rootJSON, err := c.downloadMetaUnsafe("root.json", defaultRootDownloadLimit) - if err != nil { - return err - } - - // create a new key database, and add all the public `rootKeys` to it. - c.db = verify.NewDB() - rootKeyIDs := make([]string, 0, len(rootKeys)) - for _, key := range rootKeys { - for _, id := range key.IDs() { - rootKeyIDs = append(rootKeyIDs, id) - if err := c.db.AddKey(id, key); err != nil { - return err - } - } - } - - // add a mock "root" role that trusts the passed in key ids. These keys - // will be used to verify the `root.json` we just fetched. - role := &data.Role{Threshold: threshold, KeyIDs: rootKeyIDs} - if err := c.db.AddRole("root", role); err != nil { - return err - } - - // verify that the new root is valid. - if err := c.decodeRoot(rootJSON); err != nil { - return err - } - - return c.local.SetMeta("root.json", rootJSON) -} - -// InitLocal initializes a local repository from root metadata. +// Init initializes a local repository from root metadata. // // The root's keys are extracted from the root and saved in local storage. // Root expiration is not checked. // It is expected that rootJSON was securely distributed with the software // being updated. -func (c *Client) InitLocal(rootJSON []byte) error { +func (c *Client) Init(rootJSON []byte) error { err := c.loadAndVerifyRootMeta(rootJSON, true /*ignoreExpiredCheck*/) if err != nil { return err @@ -177,33 +133,38 @@ func (c *Client) Update() (data.TargetFiles, error) { return nil, err } - // Get timestamp.json, extract snapshot.json file meta and save the - // timestamp.json locally + // Load trusted metadata files, if any, and verify them against the latest root + c.getLocalMeta() + + // 5.4.1 - Download the timestamp metadata timestampJSON, err := c.downloadMetaUnsafe("timestamp.json", defaultTimestampDownloadLimit) if err != nil { return nil, err } + // 5.4.(2,3 and 4) - Verify timestamp against various attacks + // Returns the extracted snapshot metadata snapshotMeta, err := c.decodeTimestamp(timestampJSON) if err != nil { return nil, err } + // 5.4.5 - Persist the timestamp metadata if err := c.local.SetMeta("timestamp.json", timestampJSON); err != nil { return nil, err } - // Get snapshot.json, then extract file metas. - // root.json meta should not be stored in the snapshot, if it is, - // the root will be checked, re-downloaded + // 5.5.1 - Download snapshot metadata + // 5.5.2 and 5.5.4 - Check against timestamp role's snapshot hash and version snapshotJSON, err := c.downloadMetaFromTimestamp("snapshot.json", snapshotMeta) if err != nil { return nil, err } + // 5.5.(3,5 and 6) - Verify snapshot against various attacks + // Returns the extracted metadata files snapshotMetas, err := c.decodeSnapshot(snapshotJSON) if err != nil { return nil, err } - - // Save the snapshot.json + // 5.5.7 - Persist snapshot metadata if err := c.local.SetMeta("snapshot.json", snapshotJSON); err != nil { return nil, err } @@ -213,14 +174,18 @@ func (c *Client) Update() (data.TargetFiles, error) { var updatedTargets data.TargetFiles targetsMeta := snapshotMetas["targets.json"] if !c.hasMetaFromSnapshot("targets.json", targetsMeta) { + // 5.6.1 - Download the top-level targets metadata file + // 5.6.2 and 5.6.4 - Check against snapshot role's targets hash and version targetsJSON, err := c.downloadMetaFromSnapshot("targets.json", targetsMeta) if err != nil { return nil, err } + // 5.6.(3 and 5) - Verify signatures and check against freeze attack updatedTargets, err = c.decodeTargets(targetsJSON) if err != nil { return nil, err } + // 5.6.6 - Persist targets metadata if err := c.local.SetMeta("targets.json", targetsJSON); err != nil { return nil, err } @@ -393,44 +358,69 @@ func (c *Client) UpdateRoots() error { // getLocalMeta decodes and verifies metadata from local storage. // The verification of local files is purely for consistency, if an attacker // has compromised the local storage, there is no guarantee it can be trusted. +// Before trying to load the metadata files, it clears the in-memory copy of the local metadata. +// This is to insure that all of the loaded metadata files at the end are indeed verified by the latest root. +// If some of the metadata files fail to load it will proceed with trying to load the rest, +// but still return an error at the end, if such occurred. Otherwise returns nil. func (c *Client) getLocalMeta() error { + var retErr error + loadFailed := false + // Clear the in-memory copy of the local metadata. The goal is to reload and take into account + // only the metadata files that are verified by the latest root. Otherwise, their content should + // be ignored. + c.localMeta = make(map[string]json.RawMessage) + + // Load the latest root meta if err := c.loadAndVerifyLocalRootMeta( /*ignoreExpiredCheck=*/ false); err != nil { return err } + // Load into memory the existing meta, if any, from the local storage meta, err := c.local.GetMeta() if err != nil { return nil } + // Verify the top-level metadata (timestamp, snapshot and targets) against the latest root and load it, if okay if timestampJSON, ok := meta["timestamp.json"]; ok { timestamp := &data.Timestamp{} if err := c.db.UnmarshalTrusted(timestampJSON, timestamp, "timestamp"); err != nil { - return err + loadFailed = true + retErr = err + } else { + c.localMeta["timestamp.json"] = meta["timestamp.json"] + c.timestampVer = timestamp.Version } - c.timestampVer = timestamp.Version } if snapshotJSON, ok := meta["snapshot.json"]; ok { snapshot := &data.Snapshot{} if err := c.db.UnmarshalTrusted(snapshotJSON, snapshot, "snapshot"); err != nil { - return err + loadFailed = true + retErr = err + } else { + c.localMeta["snapshot.json"] = meta["snapshot.json"] + c.snapshotVer = snapshot.Version } - c.snapshotVer = snapshot.Version } if targetsJSON, ok := meta["targets.json"]; ok { targets := &data.Targets{} if err := c.db.UnmarshalTrusted(targetsJSON, targets, "targets"); err != nil { - return err + loadFailed = true + retErr = err + } else { + c.localMeta["targets.json"] = meta["targets.json"] + c.targetsVer = targets.Version + // FIXME(TUF-0.9) temporarily support files with leading path separators. + // c.targets = targets.Targets + c.loadTargets(targets.Targets) } - c.targetsVer = targets.Version - // FIXME(TUF-0.9) temporarily support files with leading path separators. - // c.targets = targets.Targets - c.loadTargets(targets.Targets) } - - c.localMeta = meta + if loadFailed { + // If any of the metadata failed to be verified, return the reason for that failure + return retErr + } return nil } @@ -465,15 +455,7 @@ func (c *Client) loadAndVerifyRootMeta(rootJSON []byte, ignoreExpiredCheck bool) ndb := verify.NewDB() for id, k := range root.Keys { if err := ndb.AddKey(id, k); err != nil { - // TUF is considering in TAP-12 removing the - // requirement that the keyid hash algorithm be derived - // from the public key. So to be forwards compatible, - // we ignore `ErrWrongID` errors. - // - // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return err - } + return err } } for name, role := range root.Roles { @@ -521,15 +503,7 @@ func (c *Client) verifyRoot(aJSON []byte, bJSON []byte) (*data.Root, error) { ndb := verify.NewDB() for id, k := range aRoot.Keys { if err := ndb.AddKey(id, k); err != nil { - // TUF is considering in TAP-12 removing the - // requirement that the keyid hash algorithm be derived - // from the public key. So to be forwards compatible, - // we ignore `ErrWrongID` errors. - // - // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return nil, err - } + return nil, err } } for name, role := range aRoot.Roles { @@ -576,7 +550,7 @@ func (c *Client) downloadMetaUnsafe(name string, maxMetaSize int64) ([]byte, err // although the size has been checked above, use a LimitReader in case // the reported size is inaccurate, or size is -1 which indicates an // unknown length - return ioutil.ReadAll(io.LimitReader(r, maxMetaSize)) + return io.ReadAll(io.LimitReader(r, maxMetaSize)) } // remoteGetFunc is the type of function the download method uses to download @@ -612,7 +586,7 @@ func (c *Client) downloadTarget(file string, get remoteGetFunc, hashes data.Hash // downloadVersionedMeta downloads top-level metadata from remote storage and // verifies it using the given file metadata. -func (c *Client) downloadMeta(name string, version int, m data.FileMeta) ([]byte, error) { +func (c *Client) downloadMeta(name string, version int64, m data.FileMeta) ([]byte, error) { r, size, err := func() (io.ReadCloser, int64, error) { if c.consistentSnapshot { path := util.VersionedPath(name, version) @@ -647,60 +621,96 @@ func (c *Client) downloadMeta(name string, version int, m data.FileMeta) ([]byte stream = r } - return ioutil.ReadAll(stream) + return io.ReadAll(stream) } func (c *Client) downloadMetaFromSnapshot(name string, m data.SnapshotFileMeta) ([]byte, error) { - b, err := c.downloadMeta(name, m.Version, m.FileMeta) + b, err := c.downloadMeta(name, m.Version, data.FileMeta{Length: m.Length, Hashes: m.Hashes}) if err != nil { return nil, err } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + // 5.6.2 – Check length and hashes of fetched bytes *before* parsing metadata + if err := util.BytesMatchLenAndHashes(b, m.Length, m.Hashes); err != nil { + return nil, ErrDownloadFailed{name, err} + } + + meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, err } - if err := util.SnapshotFileMetaEqual(meta, m); err != nil { + + // 5.6.4 - Check against snapshot role's version + if err := util.VersionEqual(meta.Version, m.Version); err != nil { return nil, ErrDownloadFailed{name, err} } + return b, nil } func (c *Client) downloadMetaFromTimestamp(name string, m data.TimestampFileMeta) ([]byte, error) { - b, err := c.downloadMeta(name, m.Version, m.FileMeta) + b, err := c.downloadMeta(name, m.Version, data.FileMeta{Length: m.Length, Hashes: m.Hashes}) if err != nil { return nil, err } - meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + // 5.2.2. – Check length and hashes of fetched bytes *before* parsing metadata + if err := util.BytesMatchLenAndHashes(b, m.Length, m.Hashes); err != nil { + return nil, ErrDownloadFailed{name, err} + } + + meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, err } - if err := util.TimestampFileMetaEqual(meta, m); err != nil { + + // 5.5.4 - Check against timestamp role's version + if err := util.VersionEqual(meta.Version, m.Version); err != nil { return nil, ErrDownloadFailed{name, err} } - return b, nil -} -// decodeRoot decodes and verifies root metadata. -func (c *Client) decodeRoot(b json.RawMessage) error { - root := &data.Root{} - if err := c.db.Unmarshal(b, root, "root", c.rootVer); err != nil { - return ErrDecodeFailed{"root.json", err} - } - c.rootVer = root.Version - c.consistentSnapshot = root.ConsistentSnapshot - return nil + return b, nil } // decodeSnapshot decodes and verifies snapshot metadata, and returns the new // root and targets file meta. func (c *Client) decodeSnapshot(b json.RawMessage) (data.SnapshotFiles, error) { snapshot := &data.Snapshot{} + // 5.5.(3 and 6) - Verify it's signed correctly and it's not expired if err := c.db.Unmarshal(b, snapshot, "snapshot", c.snapshotVer); err != nil { return data.SnapshotFiles{}, ErrDecodeFailed{"snapshot.json", err} } - c.snapshotVer = snapshot.Version + // 5.5.5 - Check for top-level targets rollback attack + // Verify explicitly that current targets meta version is less than or equal to the new one + if snapshot.Meta["targets.json"].Version < c.targetsVer { + return data.SnapshotFiles{}, verify.ErrLowVersion{Actual: snapshot.Meta["targets.json"].Version, Current: c.targetsVer} + } + + // 5.5.5 - Get the local/trusted snapshot metadata, if any, and check all target metafiles against rollback attack + // In case the local snapshot metadata was not verified by the keys in the latest root during getLocalMeta(), + // snapshot.json won't be present in c.localMeta and thus this check will not be processed. + if snapshotJSON, ok := c.localMeta["snapshot.json"]; ok { + currentSnapshot := &data.Snapshot{} + if err := c.db.UnmarshalTrusted(snapshotJSON, currentSnapshot, "snapshot"); err != nil { + return data.SnapshotFiles{}, err + } + // 5.5.5 - Check for rollback attacks in both top-level and delegated targets roles (note that the Meta object includes both) + for path, local := range currentSnapshot.Meta { + if newMeta, ok := snapshot.Meta[path]; ok { + // 5.5.5 - Check for rollback attack + if newMeta.Version < local.Version { + return data.SnapshotFiles{}, verify.ErrLowVersion{Actual: newMeta.Version, Current: local.Version} + } + } else { + // 5.5.5 - Abort the update if a target file has been removed from the new snapshot file + return data.SnapshotFiles{}, verify.ErrMissingTargetFile + } + } + } + // At this point we can trust the new snapshot, the top-level targets, and any delegated targets versions it refers to + // so we can update the client's trusted versions and proceed with persisting the new snapshot metadata + // c.snapshotVer was already set when we verified the timestamp metadata + c.targetsVer = snapshot.Meta["targets.json"].Version return snapshot.Meta, nil } @@ -708,9 +718,11 @@ func (c *Client) decodeSnapshot(b json.RawMessage) (data.SnapshotFiles, error) { // returns updated targets. func (c *Client) decodeTargets(b json.RawMessage) (data.TargetFiles, error) { targets := &data.Targets{} + // 5.6.(3 and 5) - Verify signatures and check against freeze attack if err := c.db.Unmarshal(b, targets, "targets", c.targetsVer); err != nil { return nil, ErrDecodeFailed{"targets.json", err} } + // Generate a list with the updated targets updatedTargets := make(data.TargetFiles) for path, meta := range targets.Targets { if local, ok := c.targets[path]; ok { @@ -720,7 +732,7 @@ func (c *Client) decodeTargets(b json.RawMessage) (data.TargetFiles, error) { } updatedTargets[path] = meta } - c.targetsVer = targets.Version + // c.targetsVer was already updated when we verified the snapshot metadata // FIXME(TUF-0.9) temporarily support files with leading path separators. // c.targets = targets.Targets c.loadTargets(targets.Targets) @@ -734,7 +746,15 @@ func (c *Client) decodeTimestamp(b json.RawMessage) (data.TimestampFileMeta, err if err := c.db.Unmarshal(b, timestamp, "timestamp", c.timestampVer); err != nil { return data.TimestampFileMeta{}, ErrDecodeFailed{"timestamp.json", err} } + // 5.4.3.2 - Check for snapshot rollback attack + // Verify that the current snapshot meta version is less than or equal to the new one + if timestamp.Meta["snapshot.json"].Version < c.snapshotVer { + return data.TimestampFileMeta{}, verify.ErrLowVersion{Actual: timestamp.Meta["snapshot.json"].Version, Current: c.snapshotVer} + } + // At this point we can trust the new timestamp and the snaphost version it refers to + // so we can update the client's trusted versions and proceed with persisting the new timestamp c.timestampVer = timestamp.Version + c.snapshotVer = timestamp.Meta["snapshot.json"].Version return timestamp.Meta["snapshot.json"], nil } @@ -750,7 +770,7 @@ func (c *Client) localMetaFromSnapshot(name string, m data.SnapshotFileMeta) (js if !ok { return nil, false } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) + meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.Hashes.HashAlgorithms()...) if err != nil { return nil, false } @@ -758,36 +778,6 @@ func (c *Client) localMetaFromSnapshot(name string, m data.SnapshotFileMeta) (js return b, err == nil } -// hasTargetsMeta checks whether local metadata has the given snapshot meta -//lint:ignore U1000 unused -func (c *Client) hasTargetsMeta(m data.SnapshotFileMeta) bool { - b, ok := c.localMeta["targets.json"] - if !ok { - return false - } - meta, err := util.GenerateSnapshotFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) - if err != nil { - return false - } - err = util.SnapshotFileMetaEqual(meta, m) - return err == nil -} - -// hasSnapshotMeta checks whether local metadata has the given meta -//lint:ignore U1000 unused -func (c *Client) hasMetaFromTimestamp(name string, m data.TimestampFileMeta) bool { - b, ok := c.localMeta[name] - if !ok { - return false - } - meta, err := util.GenerateTimestampFileMeta(bytes.NewReader(b), m.HashAlgorithms()...) - if err != nil { - return false - } - err = util.TimestampFileMetaEqual(meta, m) - return err == nil -} - type Destination interface { io.Writer Delete() error @@ -797,11 +787,11 @@ type Destination interface { // // dest will be deleted and an error returned in the following situations: // -// * The target does not exist in the local targets.json -// * Failed to fetch the chain of delegations accessible from local snapshot.json -// * The target does not exist in any targets -// * Metadata cannot be generated for the downloaded data -// * Generated metadata does not match local metadata for the given file +// - The target does not exist in the local targets.json +// - Failed to fetch the chain of delegations accessible from local snapshot.json +// - The target does not exist in any targets +// - Metadata cannot be generated for the downloaded data +// - Generated metadata does not match local metadata for the given file func (c *Client) Download(name string, dest Destination) (err error) { // delete dest if there is an error defer func() { diff --git a/vendor/github.com/theupdateframework/go-tuf/client/delegations.go b/vendor/github.com/theupdateframework/go-tuf/client/delegations.go index cecab3ad90..de3e6647c0 100644 --- a/vendor/github.com/theupdateframework/go-tuf/client/delegations.go +++ b/vendor/github.com/theupdateframework/go-tuf/client/delegations.go @@ -20,7 +20,11 @@ func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) { // - filter delegations with paths or path_hash_prefixes matching searched target // - 5.6.7.1 cycles protection // - 5.6.7.2 terminations - delegations := targets.NewDelegationsIterator(target) + delegations, err := targets.NewDelegationsIterator(target, c.db) + if err != nil { + return data.TargetFileMeta{}, err + } + for i := 0; i < c.MaxDelegations; i++ { d, ok := delegations.Next() if !ok { @@ -28,7 +32,7 @@ func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) { } // covers 5.6.{1,2,3,4,5,6} - targets, err := c.loadDelegatedTargets(snapshot, d.Delegatee.Name, d.Verifier) + targets, err := c.loadDelegatedTargets(snapshot, d.Delegatee.Name, d.DB) if err != nil { return data.TargetFileMeta{}, err } @@ -39,11 +43,11 @@ func (c *Client) getTargetFileMeta(target string) (data.TargetFileMeta, error) { } if targets.Delegations != nil { - delegationsVerifier, err := verify.NewDelegationsVerifier(targets.Delegations) + delegationsDB, err := verify.NewDBFromDelegations(targets.Delegations) if err != nil { return data.TargetFileMeta{}, err } - err = delegations.Add(targets.Delegations.Roles, d.Delegatee.Name, delegationsVerifier) + err = delegations.Add(targets.Delegations.Roles, d.Delegatee.Name, delegationsDB) if err != nil { return data.TargetFileMeta{}, err } @@ -75,7 +79,7 @@ func (c *Client) loadLocalSnapshot() (*data.Snapshot, error) { } // loadDelegatedTargets downloads, decodes, verifies and stores targets -func (c *Client) loadDelegatedTargets(snapshot *data.Snapshot, role string, verifier verify.DelegationsVerifier) (*data.Targets, error) { +func (c *Client) loadDelegatedTargets(snapshot *data.Snapshot, role string, db *verify.DB) (*data.Targets, error) { var err error fileName := role + ".json" fileMeta, ok := snapshot.Meta[fileName] @@ -98,11 +102,7 @@ func (c *Client) loadDelegatedTargets(snapshot *data.Snapshot, role string, veri // 5.6.3 verify signature with parent public keys // 5.6.5 verify that the targets is not expired // role "targets" is a top role verified by root keys loaded in the client db - if role == "targets" { - err = c.db.Unmarshal(raw, targets, role, fileMeta.Version) - } else { - err = verifier.Unmarshal(raw, targets, role, fileMeta.Version) - } + err = db.Unmarshal(raw, targets, role, fileMeta.Version) if err != nil { return nil, ErrDecodeFailed{fileName, err} } diff --git a/vendor/github.com/theupdateframework/go-tuf/client/errors.go b/vendor/github.com/theupdateframework/go-tuf/client/errors.go index 4806ba4d7c..3e7a5dcc4d 100644 --- a/vendor/github.com/theupdateframework/go-tuf/client/errors.go +++ b/vendor/github.com/theupdateframework/go-tuf/client/errors.go @@ -3,8 +3,6 @@ package client import ( "errors" "fmt" - - "github.com/theupdateframework/go-tuf/verify" ) var ( @@ -42,27 +40,13 @@ func (e ErrDecodeFailed) Error() string { type ErrMaxDelegations struct { Target string MaxDelegations int - SnapshotVersion int + SnapshotVersion int64 } func (e ErrMaxDelegations) Error() string { return fmt.Sprintf("tuf: max delegation of %d reached searching for %s with snapshot version %d", e.MaxDelegations, e.Target, e.SnapshotVersion) } -//lint:ignore U1000 unused -func isDecodeFailedWithErrRoleThreshold(err error) bool { - e, ok := err.(ErrDecodeFailed) - if !ok { - return false - } - return isErrRoleThreshold(e.Err) -} - -func isErrRoleThreshold(err error) bool { - _, ok := err.(verify.ErrRoleThreshold) - return ok -} - type ErrNotFound struct { File string } @@ -86,22 +70,9 @@ func (e ErrWrongSize) Error() string { return fmt.Sprintf("tuf: unexpected file size: %s (expected %d bytes, got %d bytes)", e.File, e.Expected, e.Actual) } -type ErrLatestSnapshot struct { - Version int -} - -func (e ErrLatestSnapshot) Error() string { - return fmt.Sprintf("tuf: the local snapshot version (%d) is the latest", e.Version) -} - -func IsLatestSnapshot(err error) bool { - _, ok := err.(ErrLatestSnapshot) - return ok -} - type ErrUnknownTarget struct { Name string - SnapshotVersion int + SnapshotVersion int64 } func (e ErrUnknownTarget) Error() string { @@ -128,7 +99,7 @@ func (e ErrInvalidURL) Error() string { type ErrRoleNotInSnapshot struct { Role string - SnapshotVersion int + SnapshotVersion int64 } func (e ErrRoleNotInSnapshot) Error() string { diff --git a/vendor/github.com/theupdateframework/go-tuf/client/leveldbstore/leveldbstore.go b/vendor/github.com/theupdateframework/go-tuf/client/leveldbstore/leveldbstore.go index 578023ab11..d9390494bd 100644 --- a/vendor/github.com/theupdateframework/go-tuf/client/leveldbstore/leveldbstore.go +++ b/vendor/github.com/theupdateframework/go-tuf/client/leveldbstore/leveldbstore.go @@ -4,6 +4,7 @@ import ( "encoding/json" "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/storage" tuf_client "github.com/theupdateframework/go-tuf/client" @@ -16,6 +17,10 @@ func FileLocalStore(path string) (tuf_client.LocalStore, error) { } db, err := leveldb.Open(fd, nil) + if err != nil && errors.IsCorrupted(err) { + db, err = leveldb.Recover(fd, nil) + } + return &fileLocalStore{fd: fd, db: db}, err } diff --git a/vendor/github.com/theupdateframework/go-tuf/data/types.go b/vendor/github.com/theupdateframework/go-tuf/data/types.go index db60c16ff8..d051b76246 100644 --- a/vendor/github.com/theupdateframework/go-tuf/data/types.go +++ b/vendor/github.com/theupdateframework/go-tuf/data/types.go @@ -1,12 +1,13 @@ package data import ( + "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" - "path/filepath" + "path" "strings" "sync" "time" @@ -14,18 +15,29 @@ import ( "github.com/secure-systems-lab/go-securesystemslib/cjson" ) +type KeyType string + +type KeyScheme string + +type HashAlgorithm string + const ( - KeyIDLength = sha256.Size * 2 - KeyTypeEd25519 = "ed25519" - KeyTypeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" - KeySchemeEd25519 = "ed25519" - KeySchemeECDSA_SHA2_P256 = "ecdsa-sha2-nistp256" - KeyTypeRSASSA_PSS_SHA256 = "rsa" - KeySchemeRSASSA_PSS_SHA256 = "rsassa-pss-sha256" + KeyIDLength = sha256.Size * 2 + + KeyTypeEd25519 KeyType = "ed25519" + KeyTypeECDSA_SHA2_P256 KeyType = "ecdsa-sha2-nistp256" + KeyTypeRSASSA_PSS_SHA256 KeyType = "rsa" + + KeySchemeEd25519 KeyScheme = "ed25519" + KeySchemeECDSA_SHA2_P256 KeyScheme = "ecdsa-sha2-nistp256" + KeySchemeRSASSA_PSS_SHA256 KeyScheme = "rsassa-pss-sha256" + + HashAlgorithmSHA256 HashAlgorithm = "sha256" + HashAlgorithmSHA512 HashAlgorithm = "sha512" ) var ( - HashAlgorithms = []string{"sha256", "sha512"} + HashAlgorithms = []HashAlgorithm{HashAlgorithmSHA256, HashAlgorithmSHA512} ErrPathsAndPathHashesSet = errors.New("tuf: failed validation of delegated target: paths and path_hash_prefixes are both set") ) @@ -40,9 +52,9 @@ type Signature struct { } type PublicKey struct { - Type string `json:"keytype"` - Scheme string `json:"scheme"` - Algorithms []string `json:"keyid_hash_algorithms,omitempty"` + Type KeyType `json:"keytype"` + Scheme KeyScheme `json:"scheme"` + Algorithms []HashAlgorithm `json:"keyid_hash_algorithms,omitempty"` Value json.RawMessage `json:"keyval"` ids []string @@ -50,9 +62,9 @@ type PublicKey struct { } type PrivateKey struct { - Type string `json:"keytype"` - Scheme string `json:"scheme,omitempty"` - Algorithms []string `json:"keyid_hash_algorithms,omitempty"` + Type KeyType `json:"keytype"` + Scheme KeyScheme `json:"scheme,omitempty"` + Algorithms []HashAlgorithm `json:"keyid_hash_algorithms,omitempty"` Value json.RawMessage `json:"keyval"` } @@ -96,7 +108,7 @@ func DefaultExpires(role string) time.Time { type Root struct { Type string `json:"_type"` SpecVersion string `json:"spec_version"` - Version int `json:"version"` + Version int64 `json:"version"` Expires time.Time `json:"expires"` Keys map[string]*PublicKey `json:"keys"` Roles map[string]*Role `json:"roles"` @@ -147,35 +159,32 @@ func (r *Role) AddKeyIDs(ids []string) bool { return changed } -type Files map[string]FileMeta - -type FileMeta struct { - Length int64 `json:"length,omitempty"` - Hashes Hashes `json:"hashes,omitempty"` - Custom *json.RawMessage `json:"custom,omitempty"` -} +type Files map[string]TargetFileMeta type Hashes map[string]HexBytes -func (f FileMeta) HashAlgorithms() []string { - funcs := make([]string, 0, len(f.Hashes)) - for name := range f.Hashes { +func (f Hashes) HashAlgorithms() []string { + funcs := make([]string, 0, len(f)) + for name := range f { funcs = append(funcs, name) } return funcs } -type SnapshotFileMeta struct { - FileMeta - Version int `json:"version"` +type metapathFileMeta struct { + Length int64 `json:"length,omitempty"` + Hashes Hashes `json:"hashes,omitempty"` + Version int64 `json:"version"` } +type SnapshotFileMeta metapathFileMeta + type SnapshotFiles map[string]SnapshotFileMeta type Snapshot struct { Type string `json:"_type"` SpecVersion string `json:"spec_version"` - Version int `json:"version"` + Version int64 `json:"version"` Expires time.Time `json:"expires"` Meta SnapshotFiles `json:"meta"` Custom *json.RawMessage `json:"custom,omitempty"` @@ -190,20 +199,26 @@ func NewSnapshot() *Snapshot { } } +type FileMeta struct { + Length int64 `json:"length"` + Hashes Hashes `json:"hashes"` +} + type TargetFiles map[string]TargetFileMeta type TargetFileMeta struct { FileMeta + Custom *json.RawMessage `json:"custom,omitempty"` } func (f TargetFileMeta) HashAlgorithms() []string { - return f.FileMeta.HashAlgorithms() + return f.FileMeta.Hashes.HashAlgorithms() } type Targets struct { Type string `json:"_type"` SpecVersion string `json:"spec_version"` - Version int `json:"version"` + Version int64 `json:"version"` Expires time.Time `json:"expires"` Targets TargetFiles `json:"targets"` Delegations *Delegations `json:"delegations,omitempty"` @@ -237,7 +252,7 @@ func (d *DelegatedRole) MatchesPath(file string) (bool, error) { } for _, pattern := range d.Paths { - if matched, _ := filepath.Match(pattern, file); matched { + if matched, _ := path.Match(pattern, file); matched { return true, nil } } @@ -284,7 +299,11 @@ func (d *DelegatedRole) MarshalJSON() ([]byte, error) { func (d *DelegatedRole) UnmarshalJSON(b []byte) error { type delegatedRoleAlias DelegatedRole - if err := json.Unmarshal(b, (*delegatedRoleAlias)(d)); err != nil { + // Prepare decoder + dec := json.NewDecoder(bytes.NewReader(b)) + + // Unmarshal delegated role + if err := dec.Decode((*delegatedRoleAlias)(d)); err != nil { return err } @@ -300,17 +319,14 @@ func NewTargets() *Targets { } } -type TimestampFileMeta struct { - FileMeta - Version int `json:"version"` -} +type TimestampFileMeta metapathFileMeta type TimestampFiles map[string]TimestampFileMeta type Timestamp struct { Type string `json:"_type"` SpecVersion string `json:"spec_version"` - Version int `json:"version"` + Version int64 `json:"version"` Expires time.Time `json:"expires"` Meta TimestampFiles `json:"meta"` Custom *json.RawMessage `json:"custom,omitempty"` diff --git a/vendor/github.com/theupdateframework/go-tuf/errors.go b/vendor/github.com/theupdateframework/go-tuf/errors.go index 33d67c4bcf..0051c4391c 100644 --- a/vendor/github.com/theupdateframework/go-tuf/errors.go +++ b/vendor/github.com/theupdateframework/go-tuf/errors.go @@ -17,7 +17,7 @@ type ErrMissingMetadata struct { } func (e ErrMissingMetadata) Error() string { - return fmt.Sprintf("tuf: missing metadata %s", e.Name) + return fmt.Sprintf("tuf: missing metadata file %s", e.Name) } type ErrFileNotFound struct { @@ -28,12 +28,12 @@ func (e ErrFileNotFound) Error() string { return fmt.Sprintf("tuf: file not found %s", e.Path) } -type ErrInsufficientKeys struct { +type ErrNoKeys struct { Name string } -func (e ErrInsufficientKeys) Error() string { - return fmt.Sprintf("tuf: insufficient keys to sign %s", e.Name) +func (e ErrNoKeys) Error() string { + return fmt.Sprintf("tuf: no keys available to sign %s", e.Name) } type ErrInsufficientSignatures struct { @@ -46,11 +46,12 @@ func (e ErrInsufficientSignatures) Error() string { } type ErrInvalidRole struct { - Role string + Role string + Reason string } func (e ErrInvalidRole) Error() string { - return fmt.Sprintf("tuf: invalid role %s", e.Role) + return fmt.Sprintf("tuf: invalid role %s: %s", e.Role, e.Reason) } type ErrInvalidExpires struct { @@ -87,3 +88,11 @@ type ErrPassphraseRequired struct { func (e ErrPassphraseRequired) Error() string { return fmt.Sprintf("tuf: a passphrase is required to access the encrypted %s keys file", e.Role) } + +type ErrNoDelegatedTarget struct { + Path string +} + +func (e ErrNoDelegatedTarget) Error() string { + return fmt.Sprintf("tuf: no delegated target for path %s", e.Path) +} diff --git a/vendor/github.com/theupdateframework/go-tuf/internal/sets/strings.go b/vendor/github.com/theupdateframework/go-tuf/internal/sets/strings.go new file mode 100644 index 0000000000..7eee57d094 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/internal/sets/strings.go @@ -0,0 +1,24 @@ +package sets + +func StringSliceToSet(items []string) map[string]struct{} { + s := map[string]struct{}{} + for _, item := range items { + s[item] = struct{}{} + } + return s +} + +func StringSetToSlice(items map[string]struct{}) []string { + ret := []string{} + + for k := range items { + ret = append(ret, k) + } + + return ret +} + +func DeduplicateStrings(items []string) []string { + s := StringSliceToSet(items) + return StringSetToSlice(s) +} diff --git a/vendor/github.com/theupdateframework/go-tuf/local_store.go b/vendor/github.com/theupdateframework/go-tuf/local_store.go index 139f436f5c..34038f3508 100644 --- a/vendor/github.com/theupdateframework/go-tuf/local_store.go +++ b/vendor/github.com/theupdateframework/go-tuf/local_store.go @@ -3,29 +3,61 @@ package tuf import ( "bytes" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" + "io/fs" "os" "path/filepath" "strings" "github.com/theupdateframework/go-tuf/data" "github.com/theupdateframework/go-tuf/encrypted" + "github.com/theupdateframework/go-tuf/internal/sets" "github.com/theupdateframework/go-tuf/pkg/keys" "github.com/theupdateframework/go-tuf/util" ) -func signers(privateKeys []*data.PrivateKey) []keys.Signer { - res := make([]keys.Signer, 0, len(privateKeys)) - for _, k := range privateKeys { - signer, err := keys.GetSigner(k) - if err != nil { - continue - } - res = append(res, signer) - } - return res +type LocalStore interface { + // GetMeta returns a map from metadata file names (e.g. root.json) to their raw JSON payload or an error. + GetMeta() (map[string]json.RawMessage, error) + + // SetMeta is used to update a metadata file name with a JSON payload. + SetMeta(name string, meta json.RawMessage) error + + // WalkStagedTargets calls targetsFn for each staged target file in paths. + // If paths is empty, all staged target files will be walked. + WalkStagedTargets(paths []string, targetsFn TargetsWalkFunc) error + + // FileIsStaged determines if a metadata file is currently staged, to avoid incrementing + // version numbers repeatedly while staged. + FileIsStaged(filename string) bool + + // Commit is used to publish staged files to the repository + // + // This will also reset the staged meta to signal incrementing version numbers. + // TUF 1.0 requires that the root metadata version numbers in the repository does not + // gaps. To avoid this, we will only increment the number once until we commit. + Commit(bool, map[string]int64, map[string]data.Hashes) error + + // GetSigners return a list of signers for a role. + // This may include revoked keys, so the signers should not + // be used without filtering. + GetSigners(role string) ([]keys.Signer, error) + + // SaveSigner adds a signer to a role. + SaveSigner(role string, signer keys.Signer) error + + // SignersForRole return a list of signing keys for a role. + SignersForKeyIDs(keyIDs []string) []keys.Signer + + // Clean is used to remove all staged manifests. + Clean() error +} + +type PassphraseChanger interface { + // ChangePassphrase changes the passphrase for a role keys file. + ChangePassphrase(string) error } func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) LocalStore { @@ -33,10 +65,11 @@ func MemoryStore(meta map[string]json.RawMessage, files map[string][]byte) Local meta = make(map[string]json.RawMessage) } return &memoryStore{ - meta: meta, - stagedMeta: make(map[string]json.RawMessage), - files: files, - signers: make(map[string][]keys.Signer), + meta: meta, + stagedMeta: make(map[string]json.RawMessage), + files: files, + signerForKeyID: make(map[string]keys.Signer), + keyIDsForRole: make(map[string][]string), } } @@ -44,7 +77,9 @@ type memoryStore struct { meta map[string]json.RawMessage stagedMeta map[string]json.RawMessage files map[string][]byte - signers map[string][]keys.Signer + + signerForKeyID map[string]keys.Signer + keyIDsForRole map[string][]string } func (m *memoryStore) GetMeta() (map[string]json.RawMessage, error) { @@ -90,7 +125,7 @@ func (m *memoryStore) WalkStagedTargets(paths []string, targetsFn TargetsWalkFun return nil } -func (m *memoryStore) Commit(consistentSnapshot bool, versions map[string]int, hashes map[string]data.Hashes) error { +func (m *memoryStore) Commit(consistentSnapshot bool, versions map[string]int64, hashes map[string]data.Hashes) error { for name, meta := range m.stagedMeta { paths := computeMetadataPaths(consistentSnapshot, name, versions) for _, path := range paths { @@ -105,14 +140,53 @@ func (m *memoryStore) Commit(consistentSnapshot bool, versions map[string]int, h } func (m *memoryStore) GetSigners(role string) ([]keys.Signer, error) { - return m.signers[role], nil + keyIDs, ok := m.keyIDsForRole[role] + if ok { + return m.SignersForKeyIDs(keyIDs), nil + } + + return nil, nil } func (m *memoryStore) SaveSigner(role string, signer keys.Signer) error { - m.signers[role] = append(m.signers[role], signer) + keyIDs := signer.PublicData().IDs() + + for _, keyID := range keyIDs { + m.signerForKeyID[keyID] = signer + } + + mergedKeyIDs := sets.DeduplicateStrings(append(m.keyIDsForRole[role], keyIDs...)) + m.keyIDsForRole[role] = mergedKeyIDs return nil } +func (m *memoryStore) SignersForKeyIDs(keyIDs []string) []keys.Signer { + signers := []keys.Signer{} + keyIDsSeen := map[string]struct{}{} + + for _, keyID := range keyIDs { + signer, ok := m.signerForKeyID[keyID] + if !ok { + continue + } + addSigner := false + + for _, skid := range signer.PublicData().IDs() { + if _, seen := keyIDsSeen[skid]; !seen { + addSigner = true + } + + keyIDsSeen[skid] = struct{}{} + } + + if addSigner { + signers = append(signers, signer) + } + } + + return signers +} + func (m *memoryStore) Clean() error { return nil } @@ -126,7 +200,8 @@ func FileSystemStore(dir string, p util.PassphraseFunc) LocalStore { return &fileSystemStore{ dir: dir, passphraseFunc: p, - signers: make(map[string][]keys.Signer), + signerForKeyID: make(map[string]keys.Signer), + keyIDsForRole: make(map[string][]string), } } @@ -134,8 +209,8 @@ type fileSystemStore struct { dir string passphraseFunc util.PassphraseFunc - // signers is a cache of persisted keys to avoid decrypting multiple times - signers map[string][]keys.Signer + signerForKeyID map[string]keys.Signer + keyIDsForRole map[string][]string } func (f *fileSystemStore) repoDir() string { @@ -146,25 +221,65 @@ func (f *fileSystemStore) stagedDir() string { return filepath.Join(f.dir, "staged") } +func isMetaFile(e os.DirEntry) (bool, error) { + if e.IsDir() || filepath.Ext(e.Name()) != ".json" { + return false, nil + } + + info, err := e.Info() + if err != nil { + return false, err + } + + return info.Mode().IsRegular(), nil +} + func (f *fileSystemStore) GetMeta() (map[string]json.RawMessage, error) { - meta := make(map[string]json.RawMessage) - var err error - notExists := func(path string) bool { - _, err := os.Stat(path) - return os.IsNotExist(err) - } - for _, name := range topLevelMetadata { - path := filepath.Join(f.stagedDir(), name) - if notExists(path) { - path = filepath.Join(f.repoDir(), name) - if notExists(path) { - continue - } + // Build a map of metadata names (e.g. root.json) to their full paths + // (whether in the committed repo dir, or in the staged repo dir). + metaPaths := map[string]string{} + + rd := f.repoDir() + committed, err := os.ReadDir(f.repoDir()) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("could not list repo dir: %w", err) + } + + for _, e := range committed { + imf, err := isMetaFile(e) + if err != nil { + return nil, err } - meta[name], err = ioutil.ReadFile(path) + if imf { + name := e.Name() + metaPaths[name] = filepath.Join(rd, name) + } + } + + sd := f.stagedDir() + staged, err := os.ReadDir(sd) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return nil, fmt.Errorf("could not list staged dir: %w", err) + } + + for _, e := range staged { + imf, err := isMetaFile(e) if err != nil { return nil, err } + if imf { + name := e.Name() + metaPaths[name] = filepath.Join(sd, name) + } + } + + meta := make(map[string]json.RawMessage) + for name, path := range metaPaths { + f, err := os.ReadFile(path) + if err != nil { + return nil, err + } + meta[name] = f } return meta, nil } @@ -195,44 +310,44 @@ func (f *fileSystemStore) createDirs() error { func (f *fileSystemStore) WalkStagedTargets(paths []string, targetsFn TargetsWalkFunc) error { if len(paths) == 0 { - walkFunc := func(path string, info os.FileInfo, err error) error { + walkFunc := func(fpath string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() || !info.Mode().IsRegular() { return nil } - rel, err := filepath.Rel(filepath.Join(f.stagedDir(), "targets"), path) + rel, err := filepath.Rel(filepath.Join(f.stagedDir(), "targets"), fpath) if err != nil { return err } - file, err := os.Open(path) + file, err := os.Open(fpath) if err != nil { return err } defer file.Close() - return targetsFn(rel, file) + return targetsFn(filepath.ToSlash(rel), file) } return filepath.Walk(filepath.Join(f.stagedDir(), "targets"), walkFunc) } // check all the files exist before processing any files for _, path := range paths { - realPath := filepath.Join(f.stagedDir(), "targets", path) - if _, err := os.Stat(realPath); err != nil { + realFilepath := filepath.Join(f.stagedDir(), "targets", path) + if _, err := os.Stat(realFilepath); err != nil { if os.IsNotExist(err) { - return ErrFileNotFound{realPath} + return ErrFileNotFound{realFilepath} } return err } } for _, path := range paths { - realPath := filepath.Join(f.stagedDir(), "targets", path) - file, err := os.Open(realPath) + realFilepath := filepath.Join(f.stagedDir(), "targets", path) + file, err := os.Open(realFilepath) if err != nil { if os.IsNotExist(err) { - return ErrFileNotFound{realPath} + return ErrFileNotFound{realFilepath} } return err } @@ -253,27 +368,28 @@ func (f *fileSystemStore) createRepoFile(path string) (*os.File, error) { return os.Create(dst) } -func (f *fileSystemStore) Commit(consistentSnapshot bool, versions map[string]int, hashes map[string]data.Hashes) error { +func (f *fileSystemStore) Commit(consistentSnapshot bool, versions map[string]int64, hashes map[string]data.Hashes) error { isTarget := func(path string) bool { return strings.HasPrefix(path, "targets/") } - copyToRepo := func(path string, info os.FileInfo, err error) error { + copyToRepo := func(fpath string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() || !info.Mode().IsRegular() { return nil } - rel, err := filepath.Rel(f.stagedDir(), path) + rel, err := filepath.Rel(f.stagedDir(), fpath) if err != nil { return err } + relpath := filepath.ToSlash(rel) var paths []string - if isTarget(rel) { - paths = computeTargetPaths(consistentSnapshot, rel, hashes) + if isTarget(relpath) { + paths = computeTargetPaths(consistentSnapshot, relpath, hashes) } else { - paths = computeMetadataPaths(consistentSnapshot, rel, versions) + paths = computeMetadataPaths(consistentSnapshot, relpath, versions) } var files []io.Writer for _, path := range paths { @@ -284,7 +400,7 @@ func (f *fileSystemStore) Commit(consistentSnapshot bool, versions map[string]in defer file.Close() files = append(files, file) } - staged, err := os.Open(path) + staged, err := os.Open(fpath) if err != nil { return err } @@ -294,32 +410,50 @@ func (f *fileSystemStore) Commit(consistentSnapshot bool, versions map[string]in } return nil } - needsRemoval := func(path string) bool { + // Checks if target file should be deleted + needsRemoval := func(fpath string) bool { if consistentSnapshot { // strip out the hash - name := strings.SplitN(filepath.Base(path), ".", 2) + name := strings.SplitN(filepath.Base(fpath), ".", 2) if len(name) != 2 || name[1] == "" { return false } - path = filepath.Join(filepath.Dir(path), name[1]) + fpath = filepath.Join(filepath.Dir(fpath), name[1]) } - _, ok := hashes[path] + _, ok := hashes[filepath.ToSlash(fpath)] return !ok } - removeFile := func(path string, info os.FileInfo, err error) error { + // Checks if folder is empty + folderNeedsRemoval := func(fpath string) bool { + f, err := os.Open(fpath) + if err != nil { + return false + } + defer f.Close() + _, err = f.Readdirnames(1) + return err == io.EOF + } + removeFile := func(fpath string, info os.FileInfo, err error) error { if err != nil { return err } - rel, err := filepath.Rel(f.repoDir(), path) + rel, err := filepath.Rel(f.repoDir(), fpath) if err != nil { return err } - if !info.IsDir() && isTarget(rel) && needsRemoval(rel) { - //lint:ignore SA9003 empty branch - if err := os.Remove(path); err != nil { - // TODO: log / handle error + relpath := filepath.ToSlash(rel) + if !info.IsDir() && isTarget(relpath) && needsRemoval(rel) { + // Delete the target file + if err := os.Remove(fpath); err != nil { + return err + } + // Delete the target folder too if it's empty + targetFolder := filepath.Dir(fpath) + if folderNeedsRemoval(targetFolder) { + if err := os.Remove(targetFolder); err != nil { + return err + } } - // TODO: remove empty directory } return nil } @@ -333,18 +467,63 @@ func (f *fileSystemStore) Commit(consistentSnapshot bool, versions map[string]in } func (f *fileSystemStore) GetSigners(role string) ([]keys.Signer, error) { - if keys, ok := f.signers[role]; ok { - return keys, nil + keyIDs, ok := f.keyIDsForRole[role] + if ok { + return f.SignersForKeyIDs(keyIDs), nil } - keys, _, err := f.loadPrivateKeys(role) + + privKeys, _, err := f.loadPrivateKeys(role) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } - f.signers[role] = signers(keys) - return f.signers[role], nil + + signers := []keys.Signer{} + for _, key := range privKeys { + signer, err := keys.GetSigner(key) + if err != nil { + return nil, err + } + + // Cache the signers. + for _, keyID := range signer.PublicData().IDs() { + f.keyIDsForRole[role] = append(f.keyIDsForRole[role], keyID) + f.signerForKeyID[keyID] = signer + } + signers = append(signers, signer) + } + + return signers, nil +} + +func (f *fileSystemStore) SignersForKeyIDs(keyIDs []string) []keys.Signer { + signers := []keys.Signer{} + keyIDsSeen := map[string]struct{}{} + + for _, keyID := range keyIDs { + signer, ok := f.signerForKeyID[keyID] + if !ok { + continue + } + + addSigner := false + + for _, skid := range signer.PublicData().IDs() { + if _, seen := keyIDsSeen[skid]; !seen { + addSigner = true + } + + keyIDsSeen[skid] = struct{}{} + } + + if addSigner { + signers = append(signers, signer) + } + } + + return signers } // ChangePassphrase changes the passphrase for a role keys file. Implements @@ -391,7 +570,7 @@ func (f *fileSystemStore) SaveSigner(role string, signer keys.Signer) error { } // add the key to the existing keys (if any) - keys, pass, err := f.loadPrivateKeys(role) + privKeys, pass, err := f.loadPrivateKeys(role) if err != nil && !os.IsNotExist(err) { return err } @@ -399,7 +578,7 @@ func (f *fileSystemStore) SaveSigner(role string, signer keys.Signer) error { if err != nil { return err } - keys = append(keys, key) + privKeys = append(privKeys, key) // if loadPrivateKeys didn't return a passphrase (because no keys yet exist) // and passphraseFunc is set, get the passphrase so the keys file can @@ -414,13 +593,13 @@ func (f *fileSystemStore) SaveSigner(role string, signer keys.Signer) error { pk := &persistedKeys{} if pass != nil { - pk.Data, err = encrypted.Marshal(keys, pass) + pk.Data, err = encrypted.Marshal(privKeys, pass) if err != nil { return err } pk.Encrypted = true } else { - pk.Data, err = json.MarshalIndent(keys, "", "\t") + pk.Data, err = json.MarshalIndent(privKeys, "", "\t") if err != nil { return err } @@ -432,7 +611,27 @@ func (f *fileSystemStore) SaveSigner(role string, signer keys.Signer) error { if err := util.AtomicallyWriteFile(f.keysPath(role), append(data, '\n'), 0600); err != nil { return err } - f.signers[role] = append(f.signers[role], signer) + + // Merge privKeys into f.keyIDsForRole and register signers with + // f.signerForKeyID. + keyIDsForRole := f.keyIDsForRole[role] + for _, key := range privKeys { + signer, err := keys.GetSigner(key) + if err != nil { + return err + } + + keyIDs := signer.PublicData().IDs() + + for _, keyID := range keyIDs { + f.signerForKeyID[keyID] = signer + } + + keyIDsForRole = append(keyIDsForRole, keyIDs...) + } + + f.keyIDsForRole[role] = sets.DeduplicateStrings(keyIDsForRole) + return nil } @@ -502,7 +701,7 @@ func computeTargetPaths(consistentSnapshot bool, name string, hashes map[string] } } -func computeMetadataPaths(consistentSnapshot bool, name string, versions map[string]int) []string { +func computeMetadataPaths(consistentSnapshot bool, name string, versions map[string]int64) []string { copyVersion := false switch name { diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/deprecated/set_ecdsa/set_ecdsa.go b/vendor/github.com/theupdateframework/go-tuf/pkg/deprecated/set_ecdsa/set_ecdsa.go new file mode 100644 index 0000000000..de3771e3ae --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/deprecated/set_ecdsa/set_ecdsa.go @@ -0,0 +1,23 @@ +package set_ecdsa + +import ( + "errors" + + "github.com/theupdateframework/go-tuf/data" + "github.com/theupdateframework/go-tuf/pkg/keys" +) + +/* + Importing this package will allow support for both hex-encoded ECDSA + verifiers and PEM-encoded ECDSA verifiers. + Note that this package imports "github.com/theupdateframework/go-tuf/pkg/keys" + and overrides the ECDSA verifier loaded at init time in that package. +*/ + +func init() { + _, ok := keys.VerifierMap.Load(data.KeyTypeECDSA_SHA2_P256) + if !ok { + panic(errors.New("expected to override previously loaded PEM-only ECDSA verifier")) + } + keys.VerifierMap.Store(data.KeyTypeECDSA_SHA2_P256, keys.NewDeprecatedEcdsaVerifier) +} diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/deprecated_ecdsa.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/deprecated_ecdsa.go new file mode 100644 index 0000000000..4a8f151ea6 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/deprecated_ecdsa.go @@ -0,0 +1,103 @@ +package keys + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/theupdateframework/go-tuf/data" +) + +func NewDeprecatedEcdsaVerifier() Verifier { + return &ecdsaVerifierWithDeprecatedSupport{} +} + +type ecdsaVerifierWithDeprecatedSupport struct { + key *data.PublicKey + // This will switch based on whether this is a PEM-encoded key + // or a deprecated hex-encoded key. + Verifier +} + +func (p *ecdsaVerifierWithDeprecatedSupport) UnmarshalPublicKey(key *data.PublicKey) error { + p.key = key + pemVerifier := &EcdsaVerifier{} + if err := pemVerifier.UnmarshalPublicKey(key); err != nil { + // Try the deprecated hex-encoded verifier + hexVerifier := &deprecatedP256Verifier{} + if err := hexVerifier.UnmarshalPublicKey(key); err != nil { + return err + } + p.Verifier = hexVerifier + return nil + } + p.Verifier = pemVerifier + return nil +} + +/* + Deprecated ecdsaVerifier that used hex-encoded public keys. + This MAY be used to verify existing metadata that used this + old format. This will be deprecated soon, ensure that repositories + are re-signed and clients receieve a fully compliant root. +*/ + +type deprecatedP256Verifier struct { + PublicKey data.HexBytes `json:"public"` + key *data.PublicKey +} + +func (p *deprecatedP256Verifier) Public() string { + return p.PublicKey.String() +} + +func (p *deprecatedP256Verifier) Verify(msg, sigBytes []byte) error { + x, y := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) + k := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: x, + Y: y, + } + + hash := sha256.Sum256(msg) + + if !ecdsa.VerifyASN1(k, hash[:], sigBytes) { + return errors.New("tuf: deprecated ecdsa signature verification failed") + } + return nil +} + +func (p *deprecatedP256Verifier) MarshalPublicKey() *data.PublicKey { + return p.key +} + +func (p *deprecatedP256Verifier) UnmarshalPublicKey(key *data.PublicKey) error { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } + return err + } + + curve := elliptic.P256() + + // Parse as uncompressed marshalled point. + x, _ := elliptic.Unmarshal(curve, p.PublicKey) + if x == nil { + return errors.New("tuf: invalid ecdsa public key point") + } + + p.key = key + fmt.Fprintln(os.Stderr, "tuf: warning using deprecated ecdsa hex-encoded keys") + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go index 1471235b68..ee93e33007 100644 --- a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ecdsa.go @@ -1,71 +1,171 @@ package keys import ( + "bytes" "crypto/ecdsa" "crypto/elliptic" + "crypto/rand" "crypto/sha256" - "encoding/asn1" + "crypto/x509" "encoding/json" + "encoding/pem" "errors" - "math/big" + "fmt" + "io" "github.com/theupdateframework/go-tuf/data" ) func init() { - VerifierMap.Store(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier) + // Note: we use LoadOrStore here to prevent accidentally overriding the + // an explicit deprecated ECDSA verifier. + // TODO: When deprecated ECDSA is removed, this can switch back to Store. + VerifierMap.LoadOrStore(data.KeyTypeECDSA_SHA2_P256, NewEcdsaVerifier) + SignerMap.Store(data.KeyTypeECDSA_SHA2_P256, newEcdsaSigner) } func NewEcdsaVerifier() Verifier { - return &p256Verifier{} + return &EcdsaVerifier{} } -type ecdsaSignature struct { - R, S *big.Int +func newEcdsaSigner() Signer { + return &ecdsaSigner{} } -type p256Verifier struct { - PublicKey data.HexBytes `json:"public"` +type EcdsaVerifier struct { + PublicKey *PKIXPublicKey `json:"public"` + ecdsaKey *ecdsa.PublicKey key *data.PublicKey } -func (p *p256Verifier) Public() string { - return p.PublicKey.String() +func (p *EcdsaVerifier) Public() string { + // This is already verified to succeed when unmarshalling a public key. + r, err := x509.MarshalPKIXPublicKey(p.ecdsaKey) + if err != nil { + // TODO: Gracefully handle these errors. + // See https://github.com/theupdateframework/go-tuf/issues/363 + panic(err) + } + return string(r) } -func (p *p256Verifier) Verify(msg, sigBytes []byte) error { - x, y := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) - k := &ecdsa.PublicKey{ - Curve: elliptic.P256(), - X: x, - Y: y, +func (p *EcdsaVerifier) Verify(msg, sigBytes []byte) error { + hash := sha256.Sum256(msg) + + if !ecdsa.VerifyASN1(p.ecdsaKey, hash[:], sigBytes) { + return errors.New("tuf: ecdsa signature verification failed") } + return nil +} + +func (p *EcdsaVerifier) MarshalPublicKey() *data.PublicKey { + return p.key +} + +func (p *EcdsaVerifier) UnmarshalPublicKey(key *data.PublicKey) error { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) - var sig ecdsaSignature - if _, err := asn1.Unmarshal(sigBytes, &sig); err != nil { + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - hash := sha256.Sum256(msg) + ecdsaKey, ok := p.PublicKey.PublicKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("invalid public key") + } - if !ecdsa.Verify(k, hash[:], sig.R, sig.S) { - return errors.New("tuf: ecdsa signature verification failed") + if _, err := x509.MarshalPKIXPublicKey(ecdsaKey); err != nil { + return fmt.Errorf("marshalling to PKIX key: invalid public key") } + + p.ecdsaKey = ecdsaKey + p.key = key return nil } -func (p *p256Verifier) MarshalPublicKey() *data.PublicKey { - return p.key +type ecdsaSigner struct { + *ecdsa.PrivateKey +} + +type ecdsaPrivateKeyValue struct { + Private string `json:"private"` + Public *PKIXPublicKey `json:"public"` } -func (p *p256Verifier) UnmarshalPublicKey(key *data.PublicKey) error { - if err := json.Unmarshal(key.Value, p); err != nil { +func (s *ecdsaSigner) PublicData() *data.PublicKey { + // This uses a trusted public key JSON format with a trusted Public value. + keyValBytes, _ := json.Marshal(EcdsaVerifier{PublicKey: &PKIXPublicKey{PublicKey: s.Public()}}) + return &data.PublicKey{ + Type: data.KeyTypeECDSA_SHA2_P256, + Scheme: data.KeySchemeECDSA_SHA2_P256, + Algorithms: data.HashAlgorithms, + Value: keyValBytes, + } +} + +func (s *ecdsaSigner) SignMessage(message []byte) ([]byte, error) { + hash := sha256.Sum256(message) + return ecdsa.SignASN1(rand.Reader, s.PrivateKey, hash[:]) +} + +func (s *ecdsaSigner) MarshalPrivateKey() (*data.PrivateKey, error) { + priv, err := x509.MarshalECPrivateKey(s.PrivateKey) + if err != nil { + return nil, err + } + pemKey := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: priv}) + val, err := json.Marshal(ecdsaPrivateKeyValue{ + Private: string(pemKey), + Public: &PKIXPublicKey{PublicKey: s.Public()}, + }) + if err != nil { + return nil, err + } + return &data.PrivateKey{ + Type: data.KeyTypeECDSA_SHA2_P256, + Scheme: data.KeySchemeECDSA_SHA2_P256, + Algorithms: data.HashAlgorithms, + Value: val, + }, nil +} + +func (s *ecdsaSigner) UnmarshalPrivateKey(key *data.PrivateKey) error { + val := ecdsaPrivateKeyValue{} + if err := json.Unmarshal(key.Value, &val); err != nil { return err } - x, _ := elliptic.Unmarshal(elliptic.P256(), p.PublicKey) - if x == nil { - return errors.New("tuf: invalid ecdsa public key point") + block, _ := pem.Decode([]byte(val.Private)) + if block == nil { + return errors.New("invalid PEM value") } - p.key = key + if block.Type != "EC PRIVATE KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + k, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return err + } + if k.Curve != elliptic.P256() { + return errors.New("unsupported ecdsa curve") + } + if _, err := json.Marshal(EcdsaVerifier{ + PublicKey: &PKIXPublicKey{PublicKey: k.Public()}}); err != nil { + return fmt.Errorf("invalid public key: %s", err) + } + + s.PrivateKey = k return nil } + +func GenerateEcdsaKey() (*ecdsaSigner, error) { + privkey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + return &ecdsaSigner{privkey}, nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ed25519.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ed25519.go index 88f6f86439..1e4c66ccc7 100644 --- a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ed25519.go +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/ed25519.go @@ -1,25 +1,29 @@ package keys import ( + "bytes" "crypto" "crypto/ed25519" "crypto/rand" + "crypto/subtle" "encoding/json" "errors" + "fmt" + "io" "github.com/theupdateframework/go-tuf/data" ) func init() { - SignerMap.Store(data.KeySchemeEd25519, NewP256Signer) - VerifierMap.Store(data.KeySchemeEd25519, NewP256Verifier) + SignerMap.Store(data.KeyTypeEd25519, NewEd25519Signer) + VerifierMap.Store(data.KeyTypeEd25519, NewEd25519Verifier) } -func NewP256Signer() Signer { +func NewEd25519Signer() Signer { return &ed25519Signer{} } -func NewP256Verifier() Verifier { +func NewEd25519Verifier() Verifier { return &ed25519Verifier{} } @@ -45,11 +49,19 @@ func (e *ed25519Verifier) MarshalPublicKey() *data.PublicKey { func (e *ed25519Verifier) UnmarshalPublicKey(key *data.PublicKey) error { e.key = key - if err := json.Unmarshal(key.Value, e); err != nil { + + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(e); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - if len(e.PublicKey) != ed25519.PublicKeySize { - return errors.New("tuf: unexpected public key length for ed25519 key") + if n := len(e.PublicKey); n != ed25519.PublicKeySize { + return fmt.Errorf("tuf: unexpected public key length for ed25519 key, expected %d, got %d", ed25519.PublicKeySize, n) } return nil } @@ -61,10 +73,6 @@ type Ed25519PrivateKeyValue struct { type ed25519Signer struct { ed25519.PrivateKey - - keyType string - keyScheme string - keyAlgorithms []string } func GenerateEd25519Key() (*ed25519Signer, error) { @@ -76,19 +84,13 @@ func GenerateEd25519Key() (*ed25519Signer, error) { return nil, err } return &ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(private)), - keyType: data.KeyTypeEd25519, - keyScheme: data.KeySchemeEd25519, - keyAlgorithms: data.HashAlgorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(private)), }, nil } -func NewEd25519Signer(keyValue Ed25519PrivateKeyValue) *ed25519Signer { +func NewEd25519SignerFromKey(keyValue Ed25519PrivateKeyValue) *ed25519Signer { return &ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), - keyType: data.KeyTypeEd25519, - keyScheme: data.KeySchemeEd25519, - keyAlgorithms: data.HashAlgorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), } } @@ -105,23 +107,45 @@ func (e *ed25519Signer) MarshalPrivateKey() (*data.PrivateKey, error) { return nil, err } return &data.PrivateKey{ - Type: e.keyType, - Scheme: e.keyScheme, - Algorithms: e.keyAlgorithms, + Type: data.KeyTypeEd25519, + Scheme: data.KeySchemeEd25519, + Algorithms: data.HashAlgorithms, Value: valueBytes, }, nil } func (e *ed25519Signer) UnmarshalPrivateKey(key *data.PrivateKey) error { keyValue := &Ed25519PrivateKeyValue{} - if err := json.Unmarshal(key.Value, keyValue); err != nil { - return err + + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(keyValue); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the private key is truncated or too large: %w", err) + } + } + + // Check private key length + if n := len(keyValue.Private); n != ed25519.PrivateKeySize { + return fmt.Errorf("tuf: invalid ed25519 private key length, expected %d, got %d", ed25519.PrivateKeySize, n) + } + + // Generate public key from private key + pub, _, err := ed25519.GenerateKey(bytes.NewReader(keyValue.Private)) + if err != nil { + return fmt.Errorf("tuf: unable to derive public key from private key: %w", err) } + + // Compare keys + if subtle.ConstantTimeCompare(keyValue.Public, pub) != 1 { + return errors.New("tuf: public and private keys don't match") + } + + // Prepare signer *e = ed25519Signer{ - PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), - keyType: key.Type, - keyScheme: key.Scheme, - keyAlgorithms: key.Algorithms, + PrivateKey: ed25519.PrivateKey(data.HexBytes(keyValue.Private)), } return nil } @@ -129,9 +153,9 @@ func (e *ed25519Signer) UnmarshalPrivateKey(key *data.PrivateKey) error { func (e *ed25519Signer) PublicData() *data.PublicKey { keyValBytes, _ := json.Marshal(ed25519Verifier{PublicKey: []byte(e.PrivateKey.Public().(ed25519.PublicKey))}) return &data.PublicKey{ - Type: e.keyType, - Scheme: e.keyScheme, - Algorithms: e.keyAlgorithms, + Type: data.KeyTypeEd25519, + Scheme: data.KeySchemeEd25519, + Algorithms: data.HashAlgorithms, Value: keyValBytes, } } diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/keys.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/keys.go index 702c420c03..dc5f3ea2c3 100644 --- a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/keys.go +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/keys.go @@ -8,6 +8,9 @@ import ( "github.com/theupdateframework/go-tuf/data" ) +// MaxJSONKeySize defines the maximum length of a JSON payload. +const MaxJSONKeySize = 512 * 1024 // 512Kb + // SignerMap stores mapping between key type strings and signer constructors. var SignerMap sync.Map diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/pkix.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/pkix.go new file mode 100644 index 0000000000..e58d4c9f83 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/pkix.go @@ -0,0 +1,56 @@ +package keys + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +type PKIXPublicKey struct { + crypto.PublicKey +} + +func (p *PKIXPublicKey) MarshalJSON() ([]byte, error) { + bytes, err := x509.MarshalPKIXPublicKey(p.PublicKey) + if err != nil { + return nil, err + } + pemBytes := pem.EncodeToMemory(&pem.Block{ + Type: "PUBLIC KEY", + Bytes: bytes, + }) + return json.Marshal(string(pemBytes)) +} + +func (p *PKIXPublicKey) UnmarshalJSON(b []byte) error { + var pemValue string + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(b), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(&pemValue); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } + return err + } + + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return errors.New("invalid PEM value") + } + if block.Type != "PUBLIC KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + p.PublicKey = pub + return nil +} diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/rsa.go b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/rsa.go index 28c82d14e0..618f104ee4 100644 --- a/vendor/github.com/theupdateframework/go-tuf/pkg/keys/rsa.go +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/keys/rsa.go @@ -1,6 +1,7 @@ package keys import ( + "bytes" "crypto" "crypto/rand" "crypto/rsa" @@ -9,36 +10,38 @@ import ( "encoding/json" "encoding/pem" "errors" + "fmt" + "io" "github.com/theupdateframework/go-tuf/data" ) func init() { - VerifierMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaVerifier) - SignerMap.Store(data.KeyTypeRSASSA_PSS_SHA256, NewRsaSigner) + VerifierMap.Store(data.KeyTypeRSASSA_PSS_SHA256, newRsaVerifier) + SignerMap.Store(data.KeyTypeRSASSA_PSS_SHA256, newRsaSigner) } -func NewRsaVerifier() Verifier { +func newRsaVerifier() Verifier { return &rsaVerifier{} } -func NewRsaSigner() Signer { +func newRsaSigner() Signer { return &rsaSigner{} } type rsaVerifier struct { - PublicKey string `json:"public"` + PublicKey *PKIXPublicKey `json:"public"` rsaKey *rsa.PublicKey key *data.PublicKey } func (p *rsaVerifier) Public() string { - // Unique public key identifier, use a uniform encodng + // This is already verified to succeed when unmarshalling a public key. r, err := x509.MarshalPKIXPublicKey(p.rsaKey) if err != nil { - // This shouldn't happen with a valid rsa key, but fallback on the - // JSON public key string - return string(p.PublicKey) + // TODO: Gracefully handle these errors. + // See https://github.com/theupdateframework/go-tuf/issues/363 + panic(err) } return string(r) } @@ -54,56 +57,42 @@ func (p *rsaVerifier) MarshalPublicKey() *data.PublicKey { } func (p *rsaVerifier) UnmarshalPublicKey(key *data.PublicKey) error { - if err := json.Unmarshal(key.Value, p); err != nil { - return err - } - var err error - p.rsaKey, err = parseKey(p.PublicKey) - if err != nil { + // Prepare decoder limited to 512Kb + dec := json.NewDecoder(io.LimitReader(bytes.NewReader(key.Value), MaxJSONKeySize)) + + // Unmarshal key value + if err := dec.Decode(p); err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + return fmt.Errorf("tuf: the public key is truncated or too large: %w", err) + } return err } - p.key = key - return nil -} -// parseKey tries to parse a PEM []byte slice by attempting PKCS1 and PKIX in order. -func parseKey(data string) (*rsa.PublicKey, error) { - block, _ := pem.Decode([]byte(data)) - if block == nil { - return nil, errors.New("tuf: pem decoding public key failed") + rsaKey, ok := p.PublicKey.PublicKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("invalid public key") } - rsaPub, err := x509.ParsePKCS1PublicKey(block.Bytes) - if err == nil { - return rsaPub, nil - } - key, err := x509.ParsePKIXPublicKey(block.Bytes) - if err == nil { - rsaPub, ok := key.(*rsa.PublicKey) - if !ok { - return nil, errors.New("tuf: invalid rsa key") - } - return rsaPub, nil + + if _, err := x509.MarshalPKIXPublicKey(rsaKey); err != nil { + return fmt.Errorf("marshalling to PKIX key: invalid public key") } - return nil, errors.New("tuf: error unmarshalling rsa key") + + p.rsaKey = rsaKey + p.key = key + return nil } type rsaSigner struct { *rsa.PrivateKey } -type rsaPublic struct { - // PEM encoded public key. - PublicKey string `json:"public"` +type rsaPrivateKeyValue struct { + Private string `json:"private"` + Public *PKIXPublicKey `json:"public"` } func (s *rsaSigner) PublicData() *data.PublicKey { - pub, _ := x509.MarshalPKIXPublicKey(s.Public().(*rsa.PublicKey)) - pubBytes := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: pub, - }) - - keyValBytes, _ := json.Marshal(rsaPublic{PublicKey: string(pubBytes)}) + keyValBytes, _ := json.Marshal(rsaVerifier{PublicKey: &PKIXPublicKey{PublicKey: s.Public()}}) return &data.PublicKey{ Type: data.KeyTypeRSASSA_PSS_SHA256, Scheme: data.KeySchemeRSASSA_PSS_SHA256, @@ -122,11 +111,46 @@ func (s *rsaSigner) ContainsID(id string) bool { } func (s *rsaSigner) MarshalPrivateKey() (*data.PrivateKey, error) { - return nil, errors.New("not implemented for test") + priv := x509.MarshalPKCS1PrivateKey(s.PrivateKey) + pemKey := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: priv}) + val, err := json.Marshal(rsaPrivateKeyValue{ + Private: string(pemKey), + Public: &PKIXPublicKey{PublicKey: s.Public()}, + }) + if err != nil { + return nil, err + } + return &data.PrivateKey{ + Type: data.KeyTypeRSASSA_PSS_SHA256, + Scheme: data.KeySchemeRSASSA_PSS_SHA256, + Algorithms: data.HashAlgorithms, + Value: val, + }, nil } func (s *rsaSigner) UnmarshalPrivateKey(key *data.PrivateKey) error { - return errors.New("not implemented for test") + val := rsaPrivateKeyValue{} + if err := json.Unmarshal(key.Value, &val); err != nil { + return err + } + block, _ := pem.Decode([]byte(val.Private)) + if block == nil { + return errors.New("invalid PEM value") + } + if block.Type != "RSA PRIVATE KEY" { + return fmt.Errorf("invalid block type: %s", block.Type) + } + k, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return err + } + if _, err := json.Marshal(rsaVerifier{ + PublicKey: &PKIXPublicKey{PublicKey: k.Public()}}); err != nil { + return fmt.Errorf("invalid public key: %s", err) + } + + s.PrivateKey = k + return nil } func GenerateRsaKey() (*rsaSigner, error) { diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/targets/delegation.go b/vendor/github.com/theupdateframework/go-tuf/pkg/targets/delegation.go index 8e09c05c22..ccd52bae3c 100644 --- a/vendor/github.com/theupdateframework/go-tuf/pkg/targets/delegation.go +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/targets/delegation.go @@ -1,14 +1,17 @@ package targets import ( + "errors" + "github.com/theupdateframework/go-tuf/data" + "github.com/theupdateframework/go-tuf/internal/sets" "github.com/theupdateframework/go-tuf/verify" ) type Delegation struct { Delegator string - Verifier verify.DelegationsVerifier Delegatee data.DelegatedRole + DB *verify.DB } type delegationsIterator struct { @@ -17,19 +20,31 @@ type delegationsIterator struct { visitedRoles map[string]struct{} } +var ErrTopLevelTargetsRoleMissing = errors.New("tuf: top level targets role missing from top level keys DB") + // NewDelegationsIterator initialises an iterator with a first step -// on top level targets -func NewDelegationsIterator(target string) *delegationsIterator { +// on top level targets. +func NewDelegationsIterator(target string, topLevelKeysDB *verify.DB) (*delegationsIterator, error) { + targetsRole := topLevelKeysDB.GetRole("targets") + if targetsRole == nil { + return nil, ErrTopLevelTargetsRoleMissing + } + i := &delegationsIterator{ target: target, stack: []Delegation{ { - Delegatee: data.DelegatedRole{Name: "targets"}, + Delegatee: data.DelegatedRole{ + Name: "targets", + KeyIDs: sets.StringSetToSlice(targetsRole.KeyIDs), + Threshold: targetsRole.Threshold, + }, + DB: topLevelKeysDB, }, }, visitedRoles: make(map[string]struct{}), } - return i + return i, nil } func (d *delegationsIterator) Next() (value Delegation, ok bool) { @@ -57,7 +72,7 @@ func (d *delegationsIterator) Next() (value Delegation, ok bool) { return delegation, true } -func (d *delegationsIterator) Add(roles []data.DelegatedRole, delegator string, verifier verify.DelegationsVerifier) error { +func (d *delegationsIterator) Add(roles []data.DelegatedRole, delegator string, db *verify.DB) error { for i := len(roles) - 1; i >= 0; i-- { // Push the roles onto the stack in reverse so we get an preorder traversal // of the delegations graph. @@ -70,7 +85,7 @@ func (d *delegationsIterator) Add(roles []data.DelegatedRole, delegator string, delegation := Delegation{ Delegator: delegator, Delegatee: r, - Verifier: verifier, + DB: db, } d.stack = append(d.stack, delegation) } diff --git a/vendor/github.com/theupdateframework/go-tuf/pkg/targets/hash_bins.go b/vendor/github.com/theupdateframework/go-tuf/pkg/targets/hash_bins.go new file mode 100644 index 0000000000..95f4405d42 --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/pkg/targets/hash_bins.go @@ -0,0 +1,113 @@ +package targets + +import ( + "fmt" + "strconv" + "strings" +) + +const MinDelegationHashPrefixBitLen = 1 +const MaxDelegationHashPrefixBitLen = 32 + +// hexEncode formats x as a hex string. The hex string is left padded with +// zeros to padWidth, if necessary. +func hexEncode(x uint64, padWidth int) string { + // Benchmarked to be more than 10x faster than padding with Sprintf. + s := strconv.FormatUint(x, 16) + if len(s) >= padWidth { + return s + } + return strings.Repeat("0", padWidth-len(s)) + s +} + +const bitsPerHexDigit = 4 + +// numHexDigits returns the number of hex digits required to encode the given +// number of bits. +func numHexDigits(numBits int) int { + // ceil(numBits / bitsPerHexDigit) + return ((numBits - 1) / bitsPerHexDigit) + 1 +} + +// HashBins represents an ordered list of hash bin target roles, which together +// partition the space of target path hashes equal-sized buckets based on path +// has prefix. +type HashBins struct { + rolePrefix string + bitLen int + hexDigitLen int + + numBins uint64 + numPrefixesPerBin uint64 +} + +// NewHashBins creates a HashBins partitioning with 2^bitLen buckets. +func NewHashBins(rolePrefix string, bitLen int) (*HashBins, error) { + if bitLen < MinDelegationHashPrefixBitLen || bitLen > MaxDelegationHashPrefixBitLen { + return nil, fmt.Errorf("bitLen is out of bounds, should be between %v and %v inclusive", MinDelegationHashPrefixBitLen, MaxDelegationHashPrefixBitLen) + } + + hexDigitLen := numHexDigits(bitLen) + numBins := uint64(1) << bitLen + + numPrefixesTotal := uint64(1) << (bitsPerHexDigit * hexDigitLen) + numPrefixesPerBin := numPrefixesTotal / numBins + + return &HashBins{ + rolePrefix: rolePrefix, + bitLen: bitLen, + hexDigitLen: hexDigitLen, + numBins: numBins, + numPrefixesPerBin: numPrefixesPerBin, + }, nil +} + +// NumBins returns the number of hash bin partitions. +func (hb *HashBins) NumBins() uint64 { + return hb.numBins +} + +// GetBin returns the HashBin at index i, or nil if i is out of bounds. +func (hb *HashBins) GetBin(i uint64) *HashBin { + if i >= hb.numBins { + return nil + } + + return &HashBin{ + rolePrefix: hb.rolePrefix, + hexDigitLen: hb.hexDigitLen, + first: i * hb.numPrefixesPerBin, + last: ((i + 1) * hb.numPrefixesPerBin) - 1, + } +} + +// HashBin represents a hex prefix range. First should be less than Last. +type HashBin struct { + rolePrefix string + hexDigitLen int + first uint64 + last uint64 +} + +// RoleName returns the name of the role that signs for the HashBin. +func (b *HashBin) RoleName() string { + if b.first == b.last { + return b.rolePrefix + hexEncode(b.first, b.hexDigitLen) + } + + return b.rolePrefix + hexEncode(b.first, b.hexDigitLen) + "-" + hexEncode(b.last, b.hexDigitLen) +} + +// HashPrefixes returns a slice of all hash prefixes in the bin. +func (b *HashBin) HashPrefixes() []string { + n := int(b.last - b.first + 1) + ret := make([]string, int(n)) + + x := b.first + for i := 0; i < n; i++ { + ret[i] = hexEncode(x, b.hexDigitLen) + x++ + } + + return ret +} diff --git a/vendor/github.com/theupdateframework/go-tuf/repo.go b/vendor/github.com/theupdateframework/go-tuf/repo.go index 482cdf00dd..603785f1e3 100644 --- a/vendor/github.com/theupdateframework/go-tuf/repo.go +++ b/vendor/github.com/theupdateframework/go-tuf/repo.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "encoding/json" + "errors" "fmt" "io" "path" @@ -14,13 +15,20 @@ import ( "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/theupdateframework/go-tuf/data" "github.com/theupdateframework/go-tuf/internal/roles" + "github.com/theupdateframework/go-tuf/internal/sets" "github.com/theupdateframework/go-tuf/internal/signer" "github.com/theupdateframework/go-tuf/pkg/keys" + "github.com/theupdateframework/go-tuf/pkg/targets" "github.com/theupdateframework/go-tuf/sign" "github.com/theupdateframework/go-tuf/util" "github.com/theupdateframework/go-tuf/verify" ) +const ( + // The maximum number of delegations to visit while traversing the delegations graph. + defaultMaxDelegations = 32 +) + // topLevelMetadata determines the order signatures are verified when committing. var topLevelMetadata = []string{ "root.json", @@ -34,44 +42,6 @@ var topLevelMetadata = []string{ // names and generate target file metadata with additional custom metadata. type TargetsWalkFunc func(path string, target io.Reader) error -type LocalStore interface { - // GetMeta returns a map from metadata file names (e.g. root.json) to their raw JSON payload or an error. - GetMeta() (map[string]json.RawMessage, error) - - // SetMeta is used to update a metadata file name with a JSON payload. - SetMeta(string, json.RawMessage) error - - // WalkStagedTargets calls targetsFn for each staged target file in paths. - // - // If paths is empty, all staged target files will be walked. - WalkStagedTargets(paths []string, targetsFn TargetsWalkFunc) error - - // FileIsStaged determines if a metadata file is currently staged, to avoid incrementing - // version numbers repeatedly while staged. - FileIsStaged(filename string) bool - - // Commit is used to publish staged files to the repository - // - // This will also reset the staged meta to signal incrementing version numbers. - // TUF 1.0 requires that the root metadata version numbers in the repository does not - // gaps. To avoid this, we will only increment the number once until we commit. - Commit(bool, map[string]int, map[string]data.Hashes) error - - // GetSigners return a list of signers for a role. - GetSigners(string) ([]keys.Signer, error) - - // SaveSigner adds a signer to a role. - SaveSigner(string, keys.Signer) error - - // Clean is used to remove all staged metadata files. - Clean() error -} - -type PassphraseChanger interface { - // ChangePassphrase changes the passphrase for a role keys file. - ChangePassphrase(string) error -} - type Repo struct { local LocalStore hashAlgorithms []string @@ -112,11 +82,17 @@ func (r *Repo) Init(consistentSnapshot bool) error { root.ConsistentSnapshot = consistentSnapshot // Set root version to 1 for a new root. root.Version = 1 - err = r.setTopLevelMeta("root.json", root) - if err == nil { - fmt.Println("Repository initialized") + if err = r.setMeta("root.json", root); err != nil { + return err } - return err + + t.Version = 1 + if err = r.setMeta("targets.json", t); err != nil { + return err + } + + fmt.Println("Repository initialized") + return nil } func (r *Repo) topLevelKeysDB() (*verify.DB, error) { @@ -127,15 +103,7 @@ func (r *Repo) topLevelKeysDB() (*verify.DB, error) { } for id, k := range root.Keys { if err := db.AddKey(id, k); err != nil { - // TUF is considering in TAP-12 removing the - // requirement that the keyid hash algorithm be derived - // from the public key. So to be forwards compatible, - // we ignore `ErrWrongID` errors. - // - // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md - if _, ok := err.(verify.ErrWrongID); !ok { - return nil, err - } + return nil, err } } for name, role := range root.Roles { @@ -178,7 +146,7 @@ func (r *Repo) snapshot() (*data.Snapshot, error) { return snapshot, nil } -func (r *Repo) RootVersion() (int, error) { +func (r *Repo) RootVersion() (int64, error) { root, err := r.root() if err != nil { return -1, err @@ -187,23 +155,28 @@ func (r *Repo) RootVersion() (int, error) { } func (r *Repo) GetThreshold(keyRole string) (int, error) { + if roles.IsDelegatedTargetsRole(keyRole) { + // The signature threshold for a delegated targets role + // depends on the incoming delegation edge. + return -1, ErrInvalidRole{keyRole, "only thresholds for top-level roles supported"} + } root, err := r.root() if err != nil { return -1, err } role, ok := root.Roles[keyRole] if !ok { - return -1, ErrInvalidRole{keyRole} + return -1, ErrInvalidRole{keyRole, "role missing from root metadata"} } return role.Threshold, nil } func (r *Repo) SetThreshold(keyRole string, t int) error { - if !roles.IsTopLevelRole(keyRole) { - // Delegations are not currently supported, so return an error if this is not a - // top-level metadata file. - return ErrInvalidRole{keyRole} + if roles.IsDelegatedTargetsRole(keyRole) { + // The signature threshold for a delegated targets role + // depends on the incoming delegation edge. + return ErrInvalidRole{keyRole, "only thresholds for top-level roles supported"} } root, err := r.root() if err != nil { @@ -211,7 +184,7 @@ func (r *Repo) SetThreshold(keyRole string, t int) error { } role, ok := root.Roles[keyRole] if !ok { - return ErrInvalidRole{keyRole} + return ErrInvalidRole{keyRole, "role missing from root metadata"} } if role.Threshold == t { // Change was a no-op. @@ -221,7 +194,7 @@ func (r *Repo) SetThreshold(keyRole string, t int) error { if !r.local.FileIsStaged("root.json") { root.Version++ } - return r.setTopLevelMeta("root.json", root) + return r.setMeta("root.json", root) } func (r *Repo) Targets() (data.TargetFiles, error) { @@ -232,16 +205,16 @@ func (r *Repo) Targets() (data.TargetFiles, error) { return targets.Targets, nil } -func (r *Repo) SetTargetsVersion(v int) error { +func (r *Repo) SetTargetsVersion(v int64) error { t, err := r.topLevelTargets() if err != nil { return err } t.Version = v - return r.setTopLevelMeta("targets.json", t) + return r.setMeta("targets.json", t) } -func (r *Repo) TargetsVersion() (int, error) { +func (r *Repo) TargetsVersion() (int64, error) { t, err := r.topLevelTargets() if err != nil { return -1, err @@ -249,16 +222,16 @@ func (r *Repo) TargetsVersion() (int, error) { return t.Version, nil } -func (r *Repo) SetTimestampVersion(v int) error { +func (r *Repo) SetTimestampVersion(v int64) error { ts, err := r.timestamp() if err != nil { return err } ts.Version = v - return r.setTopLevelMeta("timestamp.json", ts) + return r.setMeta("timestamp.json", ts) } -func (r *Repo) TimestampVersion() (int, error) { +func (r *Repo) TimestampVersion() (int64, error) { ts, err := r.timestamp() if err != nil { return -1, err @@ -266,17 +239,17 @@ func (r *Repo) TimestampVersion() (int, error) { return ts.Version, nil } -func (r *Repo) SetSnapshotVersion(v int) error { +func (r *Repo) SetSnapshotVersion(v int64) error { s, err := r.snapshot() if err != nil { return err } s.Version = v - return r.setTopLevelMeta("snapshot.json", s) + return r.setMeta("snapshot.json", s) } -func (r *Repo) SnapshotVersion() (int, error) { +func (r *Repo) SnapshotVersion() (int64, error) { s, err := r.snapshot() if err != nil { return -1, err @@ -285,17 +258,21 @@ func (r *Repo) SnapshotVersion() (int, error) { } func (r *Repo) topLevelTargets() (*data.Targets, error) { - targetsJSON, ok := r.meta["targets.json"] + return r.targets("targets") +} + +func (r *Repo) targets(metaName string) (*data.Targets, error) { + targetsJSON, ok := r.meta[metaName+".json"] if !ok { return data.NewTargets(), nil } s := &data.Signed{} if err := json.Unmarshal(targetsJSON, s); err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling for targets %q: %w", metaName, err) } targets := &data.Targets{} if err := json.Unmarshal(s.Signed, targets); err != nil { - return nil, err + return nil, fmt.Errorf("error unmarshalling signed data for targets %q: %w", metaName, err) } return targets, nil } @@ -317,10 +294,6 @@ func (r *Repo) timestamp() (*data.Timestamp, error) { } func (r *Repo) ChangePassphrase(keyRole string) error { - if !roles.IsTopLevelRole(keyRole) { - return ErrInvalidRole{keyRole} - } - if p, ok := r.local.(PassphraseChanger); ok { return p.ChangePassphrase(keyRole) } @@ -329,35 +302,63 @@ func (r *Repo) ChangePassphrase(keyRole string) error { } func (r *Repo) GenKey(role string) ([]string, error) { + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + return r.GenKeyWithExpires(role, data.DefaultExpires(role)) } func (r *Repo) GenKeyWithExpires(keyRole string, expires time.Time) (keyids []string, err error) { - signer, err := keys.GenerateEd25519Key() + return r.GenKeyWithSchemeAndExpires(keyRole, expires, data.KeySchemeEd25519) +} + +func (r *Repo) GenKeyWithSchemeAndExpires(role string, expires time.Time, keyScheme data.KeyScheme) ([]string, error) { + var signer keys.Signer + var err error + switch keyScheme { + case data.KeySchemeEd25519: + signer, err = keys.GenerateEd25519Key() + case data.KeySchemeECDSA_SHA2_P256: + signer, err = keys.GenerateEcdsaKey() + case data.KeySchemeRSASSA_PSS_SHA256: + signer, err = keys.GenerateRsaKey() + default: + return nil, errors.New("unknown key type") + } if err != nil { - return []string{}, err + return nil, err } - if err = r.AddPrivateKeyWithExpires(keyRole, signer, expires); err != nil { - return []string{}, err + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + + if err = r.AddPrivateKeyWithExpires(role, signer, expires); err != nil { + return nil, err } - keyids = signer.PublicData().IDs() - return + return signer.PublicData().IDs(), nil } func (r *Repo) AddPrivateKey(role string, signer keys.Signer) error { + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + return r.AddPrivateKeyWithExpires(role, signer, data.DefaultExpires(role)) } func (r *Repo) AddPrivateKeyWithExpires(keyRole string, signer keys.Signer, expires time.Time) error { - if !roles.IsTopLevelRole(keyRole) { - return ErrInvalidRole{keyRole} + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + + if roles.IsDelegatedTargetsRole(keyRole) { + return ErrInvalidRole{keyRole, "only support adding keys for top-level roles"} } if !validExpires(expires) { return ErrInvalidExpires{expires} } + // Must add signer before adding verification key, so + // root.json can be signed when a new root key is added. if err := r.local.SaveSigner(keyRole, signer); err != nil { return err } @@ -370,10 +371,27 @@ func (r *Repo) AddPrivateKeyWithExpires(keyRole string, signer keys.Signer, expi } func (r *Repo) AddVerificationKey(keyRole string, pk *data.PublicKey) error { + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + return r.AddVerificationKeyWithExpiration(keyRole, pk, data.DefaultExpires(keyRole)) } func (r *Repo) AddVerificationKeyWithExpiration(keyRole string, pk *data.PublicKey, expires time.Time) error { + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + + if roles.IsDelegatedTargetsRole(keyRole) { + return ErrInvalidRole{ + Role: keyRole, + Reason: "only top-level targets roles are supported", + } + } + + if !validExpires(expires) { + return ErrInvalidExpires{expires} + } + root, err := r.root() if err != nil { return err @@ -402,7 +420,7 @@ func (r *Repo) AddVerificationKeyWithExpiration(keyRole string, pk *data.PublicK root.Version++ } - return r.setTopLevelMeta("root.json", root) + return r.setMeta("root.json", root) } func validExpires(expires time.Time) bool { @@ -444,12 +462,18 @@ func (r *Repo) RootKeys() ([]*data.PublicKey, error) { } func (r *Repo) RevokeKey(role, id string) error { + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + return r.RevokeKeyWithExpires(role, id, data.DefaultExpires("root")) } func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error { - if !roles.IsTopLevelRole(keyRole) { - return ErrInvalidRole{keyRole} + // Not compatible with delegated targets roles, since delegated targets keys + // are associated with a delegation (edge), not a role (node). + + if roles.IsDelegatedTargetsRole(keyRole) { + return ErrInvalidRole{keyRole, "only revocations for top-level roles supported"} } if !validExpires(expires) { @@ -507,37 +531,206 @@ func (r *Repo) RevokeKeyWithExpires(keyRole, id string, expires time.Time) error root.Version++ } - err = r.setTopLevelMeta("root.json", root) + err = r.setMeta("root.json", root) if err == nil { fmt.Println("Revoked", keyRole, "key with ID", id, "in root metadata") } return err } -func (r *Repo) jsonMarshal(v interface{}) ([]byte, error) { - b, err := cjson.EncodeCanonical(v) +// AddDelegatedRole is equivalent to AddDelegatedRoleWithExpires, but +// with a default expiration time. +func (r *Repo) AddDelegatedRole(delegator string, delegatedRole data.DelegatedRole, keys []*data.PublicKey) error { + return r.AddDelegatedRoleWithExpires(delegator, delegatedRole, keys, data.DefaultExpires("targets")) +} + +// AddDelegatedRoleWithExpires adds a delegation from the delegator to the +// role specified in the role argument. Key IDs referenced in role.KeyIDs +// should have corresponding Key entries in the keys argument. New metadata is +// written with the given expiration time. +func (r *Repo) AddDelegatedRoleWithExpires(delegator string, delegatedRole data.DelegatedRole, keys []*data.PublicKey, expires time.Time) error { + expires = expires.Round(time.Second) + + t, err := r.targets(delegator) + if err != nil { + return fmt.Errorf("error getting delegator (%q) metadata: %w", delegator, err) + } + + if t.Delegations == nil { + t.Delegations = &data.Delegations{} + t.Delegations.Keys = make(map[string]*data.PublicKey) + } + + for _, keyID := range delegatedRole.KeyIDs { + for _, key := range keys { + if key.ContainsID(keyID) { + t.Delegations.Keys[keyID] = key + break + } + } + } + + for _, r := range t.Delegations.Roles { + if r.Name == delegatedRole.Name { + return fmt.Errorf("role: %s is already delegated to by %s", delegatedRole.Name, r.Name) + } + } + t.Delegations.Roles = append(t.Delegations.Roles, delegatedRole) + t.Expires = expires + + delegatorFile := delegator + ".json" + if !r.local.FileIsStaged(delegatorFile) { + t.Version++ + } + + err = r.setMeta(delegatorFile, t) if err != nil { - return []byte{}, err + return fmt.Errorf("error setting metadata for %q: %w", delegatorFile, err) } + delegatee := delegatedRole.Name + dt, err := r.targets(delegatee) + if err != nil { + return fmt.Errorf("error getting delegatee (%q) metadata: %w", delegatee, err) + } + dt.Expires = expires + + delegateeFile := delegatee + ".json" + if !r.local.FileIsStaged(delegateeFile) { + dt.Version++ + } + + err = r.setMeta(delegateeFile, dt) + if err != nil { + return fmt.Errorf("error setting metadata for %q: %w", delegateeFile, err) + } + + return nil +} + +// AddDelegatedRolesForPathHashBins is equivalent to +// AddDelegatedRolesForPathHashBinsWithExpires, but with a default +// expiration time. +func (r *Repo) AddDelegatedRolesForPathHashBins(delegator string, bins *targets.HashBins, keys []*data.PublicKey, threshold int) error { + return r.AddDelegatedRolesForPathHashBinsWithExpires(delegator, bins, keys, threshold, data.DefaultExpires("targets")) +} + +// AddDelegatedRolesForPathHashBinsWithExpires adds delegations to the +// delegator role for the given hash bins configuration. New metadata is +// written with the given expiration time. +func (r *Repo) AddDelegatedRolesForPathHashBinsWithExpires(delegator string, bins *targets.HashBins, keys []*data.PublicKey, threshold int, expires time.Time) error { + keyIDs := []string{} + for _, key := range keys { + keyIDs = append(keyIDs, key.IDs()...) + } + + n := bins.NumBins() + for i := uint64(0); i < n; i += 1 { + bin := bins.GetBin(i) + name := bin.RoleName() + err := r.AddDelegatedRoleWithExpires(delegator, data.DelegatedRole{ + Name: name, + KeyIDs: sets.DeduplicateStrings(keyIDs), + PathHashPrefixes: bin.HashPrefixes(), + Threshold: threshold, + }, keys, expires) + if err != nil { + return fmt.Errorf("error adding delegation from %v to %v: %w", delegator, name, err) + } + } + + return nil +} + +// ResetTargetsDelegation is equivalent to ResetTargetsDelegationsWithExpires +// with a default expiry time. +func (r *Repo) ResetTargetsDelegations(delegator string) error { + return r.ResetTargetsDelegationsWithExpires(delegator, data.DefaultExpires("targets")) +} + +// ResetTargetsDelegationsWithExpires removes all targets delegations from the +// given delegator role. New metadata is written with the given expiration +// time. +func (r *Repo) ResetTargetsDelegationsWithExpires(delegator string, expires time.Time) error { + t, err := r.targets(delegator) + if err != nil { + return fmt.Errorf("error getting delegator (%q) metadata: %w", delegator, err) + } + + t.Delegations = &data.Delegations{} + t.Delegations.Keys = make(map[string]*data.PublicKey) + + t.Expires = expires.Round(time.Second) + + delegatorFile := delegator + ".json" + if !r.local.FileIsStaged(delegatorFile) { + t.Version++ + } + + err = r.setMeta(delegatorFile, t) + if err != nil { + return fmt.Errorf("error setting metadata for %q: %w", delegatorFile, err) + } + + return nil +} + +func (r *Repo) jsonMarshal(v interface{}) ([]byte, error) { if r.prefix == "" && r.indent == "" { - return b, nil + return json.Marshal(v) + } + return json.MarshalIndent(v, r.prefix, r.indent) +} + +func (r *Repo) dbsForRole(role string) ([]*verify.DB, error) { + dbs := []*verify.DB{} + + if roles.IsTopLevelRole(role) { + db, err := r.topLevelKeysDB() + if err != nil { + return nil, err + } + dbs = append(dbs, db) + } else { + ddbs, err := r.delegatorDBs(role) + if err != nil { + return nil, err + } + + dbs = append(dbs, ddbs...) + } + + return dbs, nil +} + +func (r *Repo) signersForRole(role string) ([]keys.Signer, error) { + dbs, err := r.dbsForRole(role) + if err != nil { + return nil, err } - var out bytes.Buffer - if err := json.Indent(&out, b, r.prefix, r.indent); err != nil { - return []byte{}, err + signers := []keys.Signer{} + for _, db := range dbs { + ss, err := r.getSignersInDB(role, db) + if err != nil { + return nil, err + } + + signers = append(signers, ss...) } - return out.Bytes(), nil + return signers, nil } -func (r *Repo) setTopLevelMeta(roleFilename string, meta interface{}) error { - keys, err := r.getSortedSigningKeys(strings.TrimSuffix(roleFilename, ".json")) +func (r *Repo) setMeta(roleFilename string, meta interface{}) error { + role := strings.TrimSuffix(roleFilename, ".json") + + signers, err := r.signersForRole(role) if err != nil { return err } - s, err := sign.Marshal(meta, keys...) + + s, err := sign.Marshal(meta, signers...) if err != nil { return err } @@ -549,36 +742,48 @@ func (r *Repo) setTopLevelMeta(roleFilename string, meta interface{}) error { return r.local.SetMeta(roleFilename, b) } -func (r *Repo) Sign(roleFilename string) error { - role := strings.TrimSuffix(roleFilename, ".json") - if !roles.IsTopLevelRole(role) { - return ErrInvalidRole{role} +// SignPayload signs the given payload using the key(s) associated with role. +// +// It returns the total number of keys used for signing, 0 (along with +// ErrNoKeys) if no keys were found, or -1 (along with an error) in error cases. +func (r *Repo) SignPayload(role string, payload *data.Signed) (int, error) { + keys, err := r.signersForRole(role) + if err != nil { + return -1, err + } + if len(keys) == 0 { + return 0, ErrNoKeys{role} } + for _, k := range keys { + if err = sign.Sign(payload, k); err != nil { + return -1, err + } + } + return len(keys), nil +} - s, err := r.SignedMeta(roleFilename) +func (r *Repo) Sign(roleFilename string) error { + signed, err := r.SignedMeta(roleFilename) if err != nil { return err } - keys, err := r.getSortedSigningKeys(role) - if err != nil { + role := strings.TrimSuffix(roleFilename, ".json") + numKeys, err := r.SignPayload(role, signed) + if errors.Is(err, ErrNoKeys{role}) { + return ErrNoKeys{roleFilename} + } else if err != nil { return err } - if len(keys) == 0 { - return ErrInsufficientKeys{roleFilename} - } - for _, k := range keys { - sign.Sign(s, k) - } - b, err := r.jsonMarshal(s) + b, err := r.jsonMarshal(signed) if err != nil { return err } r.meta[roleFilename] = b err = r.local.SetMeta(roleFilename, b) if err == nil { - fmt.Println("Signed", roleFilename, "with", len(keys), "key(s)") + fmt.Println("Signed", roleFilename, "with", numKeys, "key(s)") } return err } @@ -587,20 +792,28 @@ func (r *Repo) Sign(roleFilename string) error { // The name must be a valid metadata file name, like root.json. func (r *Repo) AddOrUpdateSignature(roleFilename string, signature data.Signature) error { role := strings.TrimSuffix(roleFilename, ".json") - if !roles.IsTopLevelRole(role) { - return ErrInvalidRole{role} - } // Check key ID is in valid for the role. - db, err := r.topLevelKeysDB() + dbs, err := r.dbsForRole(role) if err != nil { return err } - roleData := db.GetRole(role) - if roleData == nil { - return ErrInvalidRole{role} + + if len(dbs) == 0 { + return ErrInvalidRole{role, "no trusted keys for role"} + } + + keyInDB := false + for _, db := range dbs { + roleData := db.GetRole(role) + if roleData == nil { + return ErrInvalidRole{role, "role is not in verifier DB"} + } + if roleData.ValidKey(signature.KeyID) { + keyInDB = true + } } - if !roleData.ValidKey(signature.KeyID) { + if !keyInDB { return verify.ErrInvalidKey } @@ -621,9 +834,11 @@ func (r *Repo) AddOrUpdateSignature(roleFilename string, signature data.Signatur // Check signature on signed meta. Ignore threshold errors as this may not be fully // signed. - if err := db.VerifySignatures(s, role); err != nil { - if _, ok := err.(verify.ErrRoleThreshold); !ok { - return err + for _, db := range dbs { + if err := db.VerifySignatures(s, role); err != nil { + if _, ok := err.(verify.ErrRoleThreshold); !ok { + return err + } } } @@ -636,46 +851,45 @@ func (r *Repo) AddOrUpdateSignature(roleFilename string, signature data.Signatur return r.local.SetMeta(roleFilename, b) } -// getSortedSigningKeys returns available signing keys, sorted by key ID. +// getSignersInDB returns available signing interfaces, sorted by key ID. // // Only keys contained in the keys db are returned (i.e. local keys which have // been revoked are omitted), except for the root role in which case all local // keys are returned (revoked root keys still need to sign new root metadata so // clients can verify the new root.json and update their keys db accordingly). -func (r *Repo) getSortedSigningKeys(name string) ([]keys.Signer, error) { - signingKeys, err := r.local.GetSigners(name) +func (r *Repo) getSignersInDB(roleName string, db *verify.DB) ([]keys.Signer, error) { + signers, err := r.local.GetSigners(roleName) if err != nil { return nil, err } - if name == "root" { - sorted := make([]keys.Signer, len(signingKeys)) - copy(sorted, signingKeys) + + if roleName == "root" { + sorted := make([]keys.Signer, len(signers)) + copy(sorted, signers) sort.Sort(signer.ByIDs(sorted)) return sorted, nil } - db, err := r.topLevelKeysDB() - if err != nil { - return nil, err - } - role := db.GetRole(name) + + role := db.GetRole(roleName) if role == nil { return nil, nil } if len(role.KeyIDs) == 0 { return nil, nil } - keys := make([]keys.Signer, 0, len(role.KeyIDs)) - for _, key := range signingKeys { - for _, id := range key.PublicData().IDs() { + + signersInDB := []keys.Signer{} + for _, s := range signers { + for _, id := range s.PublicData().IDs() { if _, ok := role.KeyIDs[id]; ok { - keys = append(keys, key) + signersInDB = append(signersInDB, s) } } } - sort.Sort(signer.ByIDs(keys)) + sort.Sort(signer.ByIDs(signersInDB)) - return keys, nil + return signersInDB, nil } // Used to retrieve the signable portion of the metadata when using an external signing tool. @@ -691,24 +905,143 @@ func (r *Repo) SignedMeta(roleFilename string) (*data.Signed, error) { return s, nil } +// delegatorDBs returns a list of key DBs for all incoming delegations. +func (r *Repo) delegatorDBs(delegateeRole string) ([]*verify.DB, error) { + delegatorDBs := []*verify.DB{} + for metaName := range r.meta { + if roles.IsTopLevelManifest(metaName) && metaName != "targets.json" { + continue + } + roleName := strings.TrimSuffix(metaName, ".json") + + t, err := r.targets(roleName) + if err != nil { + return nil, err + } + + if t.Delegations == nil { + continue + } + + delegatesToRole := false + for _, d := range t.Delegations.Roles { + if d.Name == delegateeRole { + delegatesToRole = true + break + } + } + if !delegatesToRole { + continue + } + + db, err := verify.NewDBFromDelegations(t.Delegations) + if err != nil { + return nil, err + } + + delegatorDBs = append(delegatorDBs, db) + } + + return delegatorDBs, nil +} + +// targetDelegationForPath finds the targets metadata for the role that should +// sign the given path. The final delegation that led to the returned target +// metadata is also returned. +// +// Since there may be multiple targets roles that are able to sign a specific +// path, we must choose which roles's metadata to return. If preferredRole is +// specified (non-empty string) and eligible to sign the given path by way of +// some delegation chain, targets metadata for that role is returned. If +// preferredRole is not specified (""), we return targets metadata for the +// final role visited in the depth-first delegation traversal. +func (r *Repo) targetDelegationForPath(path string, preferredRole string) (*data.Targets, *targets.Delegation, error) { + topLevelKeysDB, err := r.topLevelKeysDB() + if err != nil { + return nil, nil, err + } + + iterator, err := targets.NewDelegationsIterator(path, topLevelKeysDB) + if err != nil { + return nil, nil, err + } + d, ok := iterator.Next() + if !ok { + return nil, nil, ErrNoDelegatedTarget{Path: path} + } + + for i := 0; i < defaultMaxDelegations; i++ { + targetsMeta, err := r.targets(d.Delegatee.Name) + if err != nil { + return nil, nil, err + } + + if preferredRole != "" && d.Delegatee.Name == preferredRole { + // The preferredRole is eligible to sign for the given path, and we've + // found its metadata. Return it. + return targetsMeta, &d, nil + } + + if targetsMeta.Delegations != nil && len(targetsMeta.Delegations.Roles) > 0 { + db, err := verify.NewDBFromDelegations(targetsMeta.Delegations) + if err != nil { + return nil, nil, err + } + + // Add delegations to the iterator that are eligible to sign for the + // given path (there may be none). + iterator.Add(targetsMeta.Delegations.Roles, d.Delegatee.Name, db) + } + + next, ok := iterator.Next() + if !ok { // No more roles to traverse. + if preferredRole == "" { + // No preferredRole was given, so return metadata for the final role in the traversal. + return targetsMeta, &d, nil + } else { + // There are no more roles to traverse, so preferredRole is either an + // invalid role, or not eligible to sign the given path. + return nil, nil, ErrNoDelegatedTarget{Path: path} + } + } + + d = next + } + + return nil, nil, ErrNoDelegatedTarget{Path: path} +} + func (r *Repo) AddTarget(path string, custom json.RawMessage) error { return r.AddTargets([]string{path}, custom) } +func (r *Repo) AddTargetToPreferredRole(path string, custom json.RawMessage, preferredRole string) error { + return r.AddTargetsToPreferredRole([]string{path}, custom, preferredRole) +} + func (r *Repo) AddTargets(paths []string, custom json.RawMessage) error { - return r.AddTargetsWithExpires(paths, custom, data.DefaultExpires("targets")) + return r.AddTargetsToPreferredRole(paths, custom, "") +} + +func (r *Repo) AddTargetsToPreferredRole(paths []string, custom json.RawMessage, preferredRole string) error { + return r.AddTargetsWithExpiresToPreferredRole(paths, custom, data.DefaultExpires("targets"), preferredRole) } func (r *Repo) AddTargetsWithDigest(digest string, digestAlg string, length int64, path string, custom json.RawMessage) error { + // TODO: Rename this to AddTargetWithDigest + // https://github.com/theupdateframework/go-tuf/issues/242 + expires := data.DefaultExpires("targets") + path = util.NormalizeTarget(path) - // TODO: support delegated targets - t, err := r.topLevelTargets() + targetsMeta, delegation, err := r.targetDelegationForPath(path, "") if err != nil { return err } + // This is the targets role that needs to sign the target file. + targetsRoleName := delegation.Delegatee.Name - meta := data.FileMeta{Length: length, Hashes: make(data.Hashes, 1)} + meta := data.TargetFileMeta{FileMeta: data.FileMeta{Length: length, Hashes: make(data.Hashes, 1)}} meta.Hashes[digestAlg], err = hex.DecodeString(digest) if err != nil { return err @@ -718,13 +1051,28 @@ func (r *Repo) AddTargetsWithDigest(digest string, digestAlg string, length int6 // metadata if len(custom) > 0 { meta.Custom = &custom - } else if t, ok := t.Targets[path]; ok { + } else if t, ok := targetsMeta.Targets[path]; ok { meta.Custom = t.Custom } - t.Targets[path] = data.TargetFileMeta{FileMeta: meta} + // What does G2 mean? Copying and pasting this comment from elsewhere in this file. + // G2 -> we no longer desire any readers to ever observe non-prefix targets. + delete(targetsMeta.Targets, "/"+path) + targetsMeta.Targets[path] = meta + + targetsMeta.Expires = expires.Round(time.Second) + + manifestName := targetsRoleName + ".json" + if !r.local.FileIsStaged(manifestName) { + targetsMeta.Version++ + } + + err = r.setMeta(manifestName, targetsMeta) + if err != nil { + return fmt.Errorf("error setting metadata for %q: %w", manifestName, err) + } - return r.writeTargetWithExpires(t, expires) + return nil } func (r *Repo) AddTargetWithExpires(path string, custom json.RawMessage, expires time.Time) error { @@ -732,57 +1080,98 @@ func (r *Repo) AddTargetWithExpires(path string, custom json.RawMessage, expires } func (r *Repo) AddTargetsWithExpires(paths []string, custom json.RawMessage, expires time.Time) error { + return r.AddTargetsWithExpiresToPreferredRole(paths, custom, expires, "") +} + +func (r *Repo) AddTargetWithExpiresToPreferredRole(path string, custom json.RawMessage, expires time.Time, preferredRole string) error { + return r.AddTargetsWithExpiresToPreferredRole([]string{path}, custom, expires, preferredRole) +} + +// AddTargetsWithExpiresToPreferredRole signs the staged targets at `paths`. +// +// If preferredRole is not the empty string, the target is added to the given +// role's manifest if delegations allow it. If delegations do not allow the +// preferredRole to sign the given path, an error is returned. +func (r *Repo) AddTargetsWithExpiresToPreferredRole(paths []string, custom json.RawMessage, expires time.Time, preferredRole string) error { if !validExpires(expires) { return ErrInvalidExpires{expires} } - t, err := r.topLevelTargets() - if err != nil { - return err - } normalizedPaths := make([]string, len(paths)) for i, path := range paths { normalizedPaths[i] = util.NormalizeTarget(path) } + + // As we iterate through staged targets files, we accumulate changes to their + // corresponding targets metadata. + updatedTargetsMeta := map[string]*data.Targets{} + if err := r.local.WalkStagedTargets(normalizedPaths, func(path string, target io.Reader) (err error) { - meta, err := util.GenerateTargetFileMeta(target, r.hashAlgorithms...) + originalMeta, delegation, err := r.targetDelegationForPath(path, preferredRole) if err != nil { return err } - path = util.NormalizeTarget(path) - // if we have custom metadata, set it, otherwise maintain + // This is the targets role that needs to sign the target file. + targetsRoleName := delegation.Delegatee.Name + + targetsMeta := originalMeta + if tm, ok := updatedTargetsMeta[targetsRoleName]; ok { + // Metadata in updatedTargetsMeta overrides staged/commited metadata. + targetsMeta = tm + } + + fileMeta, err := util.GenerateTargetFileMeta(target, r.hashAlgorithms...) + if err != nil { + return err + } + + // If we have custom metadata, set it, otherwise maintain // existing metadata if present if len(custom) > 0 { - meta.Custom = &custom - } else if t, ok := t.Targets[path]; ok { - meta.Custom = t.Custom + fileMeta.Custom = &custom + } else if tf, ok := targetsMeta.Targets[path]; ok { + fileMeta.Custom = tf.Custom } // G2 -> we no longer desire any readers to ever observe non-prefix targets. - delete(t.Targets, "/"+path) - t.Targets[path] = meta + delete(targetsMeta.Targets, "/"+path) + targetsMeta.Targets[path] = fileMeta + + updatedTargetsMeta[targetsRoleName] = targetsMeta + return nil }); err != nil { return err } - return r.writeTargetWithExpires(t, expires) -} -func (r *Repo) writeTargetWithExpires(t *data.Targets, expires time.Time) error { - t.Expires = expires.Round(time.Second) - if !r.local.FileIsStaged("targets.json") { - t.Version++ + if len(updatedTargetsMeta) == 0 { + // This is potentially unexpected behavior kept for backwards compatibility. + // See https://github.com/theupdateframework/go-tuf/issues/243 + t, err := r.topLevelTargets() + if err != nil { + return err + } + + updatedTargetsMeta["targets"] = t } - err := r.setTopLevelMeta("targets.json", t) - if err == nil { - fmt.Println("Added/staged targets:") - for k := range t.Targets { - fmt.Println("*", k) + exp := expires.Round(time.Second) + for roleName, targetsMeta := range updatedTargetsMeta { + targetsMeta.Expires = exp + + manifestName := roleName + ".json" + if !r.local.FileIsStaged(manifestName) { + targetsMeta.Version++ + } + + err := r.setMeta(manifestName, targetsMeta) + if err != nil { + return fmt.Errorf("error setting metadata for %q: %w", manifestName, err) } } - return err + + return nil } func (r *Repo) RemoveTarget(path string) error { @@ -803,7 +1192,23 @@ func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error return ErrInvalidExpires{expires} } - t, err := r.topLevelTargets() + for metaName := range r.meta { + if metaName != "targets.json" && !roles.IsDelegatedTargetsManifest(metaName) { + continue + } + + err := r.removeTargetsWithExpiresFromMeta(metaName, paths, expires) + if err != nil { + return fmt.Errorf("could not remove %v from %v: %w", paths, metaName, err) + } + } + + return nil +} + +func (r *Repo) removeTargetsWithExpiresFromMeta(metaName string, paths []string, expires time.Time) error { + roleName := strings.TrimSuffix(metaName, ".json") + t, err := r.targets(roleName) if err != nil { return err } @@ -818,7 +1223,7 @@ func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error for _, path := range paths { path = util.NormalizeTarget(path) if _, ok := t.Targets[path]; !ok { - fmt.Println("The following target is not present:", path) + fmt.Printf("[%v] The following target is not present: %v\n", metaName, path) continue } removed = true @@ -832,23 +1237,23 @@ func (r *Repo) RemoveTargetsWithExpires(paths []string, expires time.Time) error } } t.Expires = expires.Round(time.Second) - if !r.local.FileIsStaged("targets.json") { + if !r.local.FileIsStaged(metaName) { t.Version++ } - err = r.setTopLevelMeta("targets.json", t) + err = r.setMeta(metaName, t) if err == nil { - fmt.Println("Removed targets:") + fmt.Printf("[%v] Removed targets:\n", metaName) for _, v := range removed_targets { fmt.Println("*", v) } if len(t.Targets) != 0 { - fmt.Println("Added/staged targets:") + fmt.Printf("[%v] Added/staged targets:\n", metaName) for k := range t.Targets { fmt.Println("*", k) } } else { - fmt.Println("There are no added/staged targets") + fmt.Printf("[%v] There are no added/staged targets\n", metaName) } } return err @@ -859,7 +1264,16 @@ func (r *Repo) Snapshot() error { } func (r *Repo) snapshotMetadata() []string { - return []string{"targets.json"} + ret := []string{"targets.json"} + + for name := range r.meta { + if !roles.IsVersionedManifest(name) && + roles.IsDelegatedTargetsManifest(name) { + ret = append(ret, name) + } + } + + return ret } func (r *Repo) SnapshotWithExpires(expires time.Time) error { @@ -871,13 +1285,14 @@ func (r *Repo) SnapshotWithExpires(expires time.Time) error { if err != nil { return err } - db, err := r.topLevelKeysDB() - if err != nil { + + // Verify root metadata before verifying signatures on role metadata. + if err := r.verifySignatures("root.json"); err != nil { return err } for _, metaName := range r.snapshotMetadata() { - if err := r.verifySignature(metaName, db); err != nil { + if err := r.verifySignatures(metaName); err != nil { return err } var err error @@ -890,7 +1305,7 @@ func (r *Repo) SnapshotWithExpires(expires time.Time) error { if !r.local.FileIsStaged("snapshot.json") { snapshot.Version++ } - err = r.setTopLevelMeta("snapshot.json", snapshot) + err = r.setMeta("snapshot.json", snapshot) if err == nil { fmt.Println("Staged snapshot.json metadata with expiration date:", snapshot.Expires) } @@ -906,11 +1321,7 @@ func (r *Repo) TimestampWithExpires(expires time.Time) error { return ErrInvalidExpires{expires} } - db, err := r.topLevelKeysDB() - if err != nil { - return err - } - if err := r.verifySignature("snapshot.json", db); err != nil { + if err := r.verifySignatures("snapshot.json"); err != nil { return err } timestamp, err := r.timestamp() @@ -926,56 +1337,103 @@ func (r *Repo) TimestampWithExpires(expires time.Time) error { timestamp.Version++ } - err = r.setTopLevelMeta("timestamp.json", timestamp) + err = r.setMeta("timestamp.json", timestamp) if err == nil { fmt.Println("Staged timestamp.json metadata with expiration date:", timestamp.Expires) } return err } -func (r *Repo) fileVersions() (map[string]int, error) { - root, err := r.root() - if err != nil { - return nil, err - } - targets, err := r.topLevelTargets() - if err != nil { - return nil, err - } - snapshot, err := r.snapshot() - if err != nil { - return nil, err +func (r *Repo) fileVersions() (map[string]int64, error) { + versions := make(map[string]int64) + + for fileName := range r.meta { + if roles.IsVersionedManifest(fileName) { + continue + } + + roleName := strings.TrimSuffix(fileName, ".json") + + var version int64 + + switch roleName { + case "root": + root, err := r.root() + if err != nil { + return nil, err + } + version = root.Version + case "snapshot": + snapshot, err := r.snapshot() + if err != nil { + return nil, err + } + version = snapshot.Version + case "timestamp": + continue + default: + // Targets or delegated targets manifest. + targets, err := r.targets(roleName) + if err != nil { + return nil, err + } + + version = targets.Version + } + + versions[fileName] = version } - versions := make(map[string]int) - versions["root.json"] = root.Version - versions["targets.json"] = targets.Version - versions["snapshot.json"] = snapshot.Version + return versions, nil } func (r *Repo) fileHashes() (map[string]data.Hashes, error) { hashes := make(map[string]data.Hashes) - timestamp, err := r.timestamp() - if err != nil { - return nil, err - } - snapshot, err := r.snapshot() - if err != nil { - return nil, err - } - if m, ok := snapshot.Meta["targets.json"]; ok { - hashes["targets.json"] = m.Hashes - } - if m, ok := timestamp.Meta["snapshot.json"]; ok { - hashes["snapshot.json"] = m.Hashes - } - t, err := r.topLevelTargets() - if err != nil { - return nil, err - } - for name, meta := range t.Targets { - hashes[path.Join("targets", name)] = meta.Hashes + + for fileName := range r.meta { + if roles.IsVersionedManifest(fileName) { + continue + } + + roleName := strings.TrimSuffix(fileName, ".json") + + switch roleName { + case "snapshot": + timestamp, err := r.timestamp() + if err != nil { + return nil, err + } + + if m, ok := timestamp.Meta[fileName]; ok { + hashes[fileName] = m.Hashes + } + case "timestamp": + continue + default: + snapshot, err := r.snapshot() + if err != nil { + return nil, err + } + if m, ok := snapshot.Meta[fileName]; ok { + hashes[fileName] = m.Hashes + } + + if roleName != "root" { + // Scalability issue: Commit/fileHashes loads all targets metadata into memory + // https://github.com/theupdateframework/go-tuf/issues/245 + t, err := r.targets(roleName) + if err != nil { + return nil, err + } + for name, m := range t.Targets { + hashes[path.Join("targets", name)] = m.Hashes + } + } + + } + } + return hashes, nil } @@ -1030,13 +1488,8 @@ func (r *Repo) Commit() error { return fmt.Errorf("tuf: invalid snapshot.json in timestamp.json: %s", err) } - // verify all signatures are correct - db, err := r.topLevelKeysDB() - if err != nil { - return err - } for _, name := range topLevelMetadata { - if err := r.verifySignature(name, db); err != nil { + if err := r.verifySignatures(name); err != nil { return err } } @@ -1065,15 +1518,25 @@ func (r *Repo) Clean() error { return err } -func (r *Repo) verifySignature(roleFilename string, db *verify.DB) error { - s, err := r.SignedMeta(roleFilename) +func (r *Repo) verifySignatures(metaFilename string) error { + s, err := r.SignedMeta(metaFilename) if err != nil { return err } - role := strings.TrimSuffix(roleFilename, ".json") - if err := db.Verify(s, role, 0); err != nil { - return ErrInsufficientSignatures{roleFilename, err} + + role := strings.TrimSuffix(metaFilename, ".json") + + dbs, err := r.dbsForRole(role) + if err != nil { + return err + } + + for _, db := range dbs { + if err := db.Verify(s, role, 0); err != nil { + return ErrInsufficientSignatures{metaFilename, err} + } } + return nil } @@ -1092,3 +1555,58 @@ func (r *Repo) timestampFileMeta(roleFilename string) (data.TimestampFileMeta, e } return util.GenerateTimestampFileMeta(bytes.NewReader(b), r.hashAlgorithms...) } + +func (r *Repo) Payload(roleFilename string) ([]byte, error) { + s, err := r.SignedMeta(roleFilename) + if err != nil { + return nil, err + } + + p, err := cjson.EncodeCanonical(s.Signed) + if err != nil { + return nil, err + } + + return p, nil +} + +func (r *Repo) CheckRoleUnexpired(role string, validAt time.Time) error { + var expires time.Time + switch role { + case "root": + root, err := r.root() + if err != nil { + return err + } + expires = root.Expires + case "snapshot": + snapshot, err := r.snapshot() + if err != nil { + return err + } + expires = snapshot.Expires + case "timestamp": + timestamp, err := r.timestamp() + if err != nil { + return err + } + expires = timestamp.Expires + case "targets": + targets, err := r.topLevelTargets() + if err != nil { + return err + } + expires = targets.Expires + default: + return fmt.Errorf("invalid role: %s", role) + } + if expires.Before(validAt) || expires.Equal(validAt) { + return fmt.Errorf("role expired on: %s", expires) + } + return nil +} + +// GetMeta returns the underlying meta file map from the store. +func (r *Repo) GetMeta() (map[string]json.RawMessage, error) { + return r.local.GetMeta() +} diff --git a/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt b/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt new file mode 100644 index 0000000000..a364c2d70a --- /dev/null +++ b/vendor/github.com/theupdateframework/go-tuf/requirements-test.txt @@ -0,0 +1,5 @@ +iso8601==1.0.2 +requests==2.28.1 +securesystemslib==0.22.0 +six==1.16.0 +tuf==1.0.0 \ No newline at end of file diff --git a/vendor/github.com/theupdateframework/go-tuf/sign/sign.go b/vendor/github.com/theupdateframework/go-tuf/sign/sign.go index 06886b5d7d..6b15b6b4f7 100644 --- a/vendor/github.com/theupdateframework/go-tuf/sign/sign.go +++ b/vendor/github.com/theupdateframework/go-tuf/sign/sign.go @@ -1,6 +1,8 @@ package sign import ( + "encoding/json" + "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/theupdateframework/go-tuf/data" "github.com/theupdateframework/go-tuf/pkg/keys" @@ -22,7 +24,12 @@ func Sign(s *data.Signed, k keys.Signer) error { } } - sig, err := k.SignMessage(s.Signed) + canonical, err := cjson.EncodeCanonical(s.Signed) + if err != nil { + return err + } + + sig, err := k.SignMessage(canonical) if err != nil { return err } @@ -39,7 +46,7 @@ func Sign(s *data.Signed, k keys.Signer) error { } func Marshal(v interface{}, keys ...keys.Signer) (*data.Signed, error) { - b, err := cjson.EncodeCanonical(v) + b, err := json.Marshal(v) if err != nil { return nil, err } diff --git a/vendor/github.com/theupdateframework/go-tuf/util/util.go b/vendor/github.com/theupdateframework/go-tuf/util/util.go index ac86761472..049169db1a 100644 --- a/vendor/github.com/theupdateframework/go-tuf/util/util.go +++ b/vendor/github.com/theupdateframework/go-tuf/util/util.go @@ -10,7 +10,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -30,8 +29,8 @@ func (e ErrWrongLength) Error() string { } type ErrWrongVersion struct { - Expected int - Actual int + Expected int64 + Actual int64 } func (e ErrWrongVersion) Error() string { @@ -86,6 +85,32 @@ func FileMetaEqual(actual data.FileMeta, expected data.FileMeta) error { return nil } +func BytesMatchLenAndHashes(fetched []byte, length int64, hashes data.Hashes) error { + flen := int64(len(fetched)) + if length != 0 && flen != length { + return ErrWrongLength{length, flen} + } + + for alg, expected := range hashes { + var h hash.Hash + switch alg { + case "sha256": + h = sha256.New() + case "sha512": + h = sha512.New() + default: + return ErrUnknownHashAlgorithm{alg} + } + h.Write(fetched) + hash := h.Sum(nil) + if !hmac.Equal(hash, expected) { + return ErrWrongHash{alg, expected, hash} + } + } + + return nil +} + func hashEqual(actual data.Hashes, expected data.Hashes) error { hashChecked := false for typ, hash := range expected { @@ -102,7 +127,7 @@ func hashEqual(actual data.Hashes, expected data.Hashes) error { return nil } -func versionEqual(actual int, expected int) error { +func VersionEqual(actual int64, expected int64) error { if actual != expected { return ErrWrongVersion{expected, actual} } @@ -118,14 +143,14 @@ func SnapshotFileMetaEqual(actual data.SnapshotFileMeta, expected data.SnapshotF if expected.Length != 0 && actual.Length != expected.Length { return ErrWrongLength{expected.Length, actual.Length} } - + // 5.6.2 - Check against snapshot role's targets hash if len(expected.Hashes) != 0 { if err := hashEqual(actual.Hashes, expected.Hashes); err != nil { return err } } - - if err := versionEqual(actual.Version, expected.Version); err != nil { + // 5.6.4 - Check against snapshot role's snapshot version + if err := VersionEqual(actual.Version, expected.Version); err != nil { return err } @@ -137,14 +162,19 @@ func TargetFileMetaEqual(actual data.TargetFileMeta, expected data.TargetFileMet } func TimestampFileMetaEqual(actual data.TimestampFileMeta, expected data.TimestampFileMeta) error { - // As opposed to snapshots, the length and hashes are still required in - // TUF-1.0. See: - // https://github.com/theupdateframework/specification/issues/38 - if err := FileMetaEqual(actual.FileMeta, expected.FileMeta); err != nil { - return err + // TUF no longer considers the length and hashes to be a required + // member of Timestamp. + if expected.Length != 0 && actual.Length != expected.Length { + return ErrWrongLength{expected.Length, actual.Length} } - - if err := versionEqual(actual.Version, expected.Version); err != nil { + // 5.5.2 - Check against timestamp role's snapshot hash + if len(expected.Hashes) != 0 { + if err := hashEqual(actual.Hashes, expected.Hashes); err != nil { + return err + } + } + // 5.5.4 - Check against timestamp role's snapshot version + if err := VersionEqual(actual.Version, expected.Version); err != nil { return err } @@ -171,7 +201,7 @@ func GenerateFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, err hashes[hashAlgorithm] = h r = io.TeeReader(r, h) } - n, err := io.Copy(ioutil.Discard, r) + n, err := io.Copy(io.Discard, r) if err != nil { return data.FileMeta{}, err } @@ -183,11 +213,11 @@ func GenerateFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, err } type versionedMeta struct { - Version int `json:"version"` + Version int64 `json:"version"` } -func generateVersionedFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, int, error) { - b, err := ioutil.ReadAll(r) +func generateVersionedFileMeta(r io.Reader, hashAlgorithms ...string) (data.FileMeta, int64, error) { + b, err := io.ReadAll(r) if err != nil { return data.FileMeta{}, 0, err } @@ -216,8 +246,9 @@ func GenerateSnapshotFileMeta(r io.Reader, hashAlgorithms ...string) (data.Snaps return data.SnapshotFileMeta{}, err } return data.SnapshotFileMeta{ - FileMeta: m, - Version: v, + Length: m.Length, + Hashes: m.Hashes, + Version: v, }, nil } @@ -237,8 +268,9 @@ func GenerateTimestampFileMeta(r io.Reader, hashAlgorithms ...string) (data.Time return data.TimestampFileMeta{}, err } return data.TimestampFileMeta{ - FileMeta: m, - Version: v, + Length: m.Length, + Hashes: m.Hashes, + Version: v, }, nil } @@ -253,8 +285,8 @@ func NormalizeTarget(p string) string { return strings.TrimPrefix(path.Join("/", p), "/") } -func VersionedPath(p string, version int) string { - return path.Join(path.Dir(p), strconv.Itoa(version)+"."+path.Base(p)) +func VersionedPath(p string, version int64) string { + return path.Join(path.Dir(p), strconv.FormatInt(version, 10)+"."+path.Base(p)) } func HashedPaths(p string, hashes data.Hashes) []string { @@ -268,7 +300,7 @@ func HashedPaths(p string, hashes data.Hashes) []string { func AtomicallyWriteFile(filename string, data []byte, perm os.FileMode) error { dir, name := filepath.Split(filename) - f, err := ioutil.TempFile(dir, name) + f, err := os.CreateTemp(dir, name) if err != nil { return err } diff --git a/vendor/github.com/theupdateframework/go-tuf/verify/db.go b/vendor/github.com/theupdateframework/go-tuf/verify/db.go index f80949042d..04f5bf1c44 100644 --- a/vendor/github.com/theupdateframework/go-tuf/verify/db.go +++ b/vendor/github.com/theupdateframework/go-tuf/verify/db.go @@ -28,59 +28,53 @@ func NewDB() *DB { } } -type DelegationsVerifier struct { - DB *DB -} - -func (d *DelegationsVerifier) Unmarshal(b []byte, v interface{}, role string, minVersion int) error { - return d.DB.Unmarshal(b, v, role, minVersion) -} - -// NewDelegationsVerifier returns a DelegationsVerifier that verifies delegations -// of a given Targets. It reuses the DB struct to leverage verified keys, roles -// unmarshals. -func NewDelegationsVerifier(d *data.Delegations) (DelegationsVerifier, error) { +// NewDBFromDelegations returns a DB that verifies delegations +// of a given Targets. +func NewDBFromDelegations(d *data.Delegations) (*DB, error) { db := &DB{ roles: make(map[string]*Role, len(d.Roles)), verifiers: make(map[string]keys.Verifier, len(d.Keys)), } for _, r := range d.Roles { if _, ok := roles.TopLevelRoles[r.Name]; ok { - return DelegationsVerifier{}, ErrInvalidDelegatedRole + return nil, ErrInvalidDelegatedRole } role := &data.Role{Threshold: r.Threshold, KeyIDs: r.KeyIDs} - if err := db.addRole(r.Name, role); err != nil { - return DelegationsVerifier{}, err + if err := db.AddRole(r.Name, role); err != nil { + return nil, err } } for id, k := range d.Keys { if err := db.AddKey(id, k); err != nil { - return DelegationsVerifier{}, err + return nil, err } } - return DelegationsVerifier{db}, nil + return db, nil } func (db *DB) AddKey(id string, k *data.PublicKey) error { - if !k.ContainsID(id) { - return ErrWrongID{} - } verifier, err := keys.GetVerifier(k) if err != nil { - return ErrInvalidKey + return err // ErrInvalidKey + } + + // TUF is considering in TAP-12 removing the + // requirement that the keyid hash algorithm be derived + // from the public key. So to be forwards compatible, + // we allow any key ID, rather than checking k.ContainsID(id) + // + // AddKey should be idempotent, so we allow re-adding the same PublicKey. + // + // TAP-12: https://github.com/theupdateframework/taps/blob/master/tap12.md + if oldVerifier, exists := db.verifiers[id]; exists && oldVerifier.Public() != verifier.Public() { + return ErrRepeatID{id} } + db.verifiers[id] = verifier return nil } func (db *DB) AddRole(name string, r *data.Role) error { - if !roles.IsTopLevelRole(name) { - return ErrInvalidRole - } - return db.addRole(name, r) -} - -func (db *DB) addRole(name string, r *data.Role) error { if r.Threshold < 1 { return ErrInvalidThreshold } @@ -90,9 +84,6 @@ func (db *DB) addRole(name string, r *data.Role) error { Threshold: r.Threshold, } for _, id := range r.KeyIDs { - if len(id) != data.KeyIDLength { - return ErrInvalidKeyID - } role.KeyIDs[id] = struct{}{} } diff --git a/vendor/github.com/theupdateframework/go-tuf/verify/errors.go b/vendor/github.com/theupdateframework/go-tuf/verify/errors.go index c84f2166aa..f71d4bda94 100644 --- a/vendor/github.com/theupdateframework/go-tuf/verify/errors.go +++ b/vendor/github.com/theupdateframework/go-tuf/verify/errors.go @@ -18,12 +18,15 @@ var ( ErrInvalidDelegatedRole = errors.New("tuf: invalid delegated role") ErrInvalidKeyID = errors.New("tuf: invalid key id") ErrInvalidThreshold = errors.New("tuf: invalid role threshold") + ErrMissingTargetFile = errors.New("tuf: missing previously listed targets metadata file") ) -type ErrWrongID struct{} +type ErrRepeatID struct { + KeyID string +} -func (ErrWrongID) Error() string { - return "tuf: key id mismatch" +func (e ErrRepeatID) Error() string { + return fmt.Sprintf("tuf: duplicate key id (%s)", e.KeyID) } type ErrUnknownRole struct { @@ -43,8 +46,8 @@ func (e ErrExpired) Error() string { } type ErrLowVersion struct { - Actual int - Current int + Actual int64 + Current int64 } func (e ErrLowVersion) Error() string { @@ -52,8 +55,8 @@ func (e ErrLowVersion) Error() string { } type ErrWrongVersion struct { - Given int - Expected int + Given int64 + Expected int64 } func (e ErrWrongVersion) Error() string { diff --git a/vendor/github.com/theupdateframework/go-tuf/verify/verify.go b/vendor/github.com/theupdateframework/go-tuf/verify/verify.go index 2586a60aa6..f5675a250e 100644 --- a/vendor/github.com/theupdateframework/go-tuf/verify/verify.go +++ b/vendor/github.com/theupdateframework/go-tuf/verify/verify.go @@ -13,10 +13,10 @@ import ( type signedMeta struct { Type string `json:"_type"` Expires time.Time `json:"expires"` - Version int `json:"version"` + Version int64 `json:"version"` } -func (db *DB) VerifyIgnoreExpiredCheck(s *data.Signed, role string, minVersion int) error { +func (db *DB) VerifyIgnoreExpiredCheck(s *data.Signed, role string, minVersion int64) error { if err := db.VerifySignatures(s, role); err != nil { return err } @@ -46,8 +46,8 @@ func (db *DB) VerifyIgnoreExpiredCheck(s *data.Signed, role string, minVersion i return nil } -func (db *DB) Verify(s *data.Signed, role string, minVersion int) error { - +func (db *DB) Verify(s *data.Signed, role string, minVersion int64) error { + // Verify signatures and versions err := db.VerifyIgnoreExpiredCheck(s, role, minVersion) if err != nil { @@ -58,7 +58,7 @@ func (db *DB) Verify(s *data.Signed, role string, minVersion int) error { if err := json.Unmarshal(s.Signed, sm); err != nil { return err } - + // Verify expiration if IsExpired(sm.Expires) { return ErrExpired{sm.Expires} } @@ -92,8 +92,8 @@ func (db *DB) VerifySignatures(s *data.Signed, role string) error { // Verify that a threshold of keys signed the data. Since keys can have // multiple key ids, we need to protect against multiple attached // signatures that just differ on the key id. - seen := make(map[string]struct{}) - valid := 0 + verifiedKeyIDs := make(map[string]struct{}) + numVerifiedKeys := 0 for _, sig := range s.Signatures { if !roleData.ValidKey(sig.KeyID) { continue @@ -104,27 +104,38 @@ func (db *DB) VerifySignatures(s *data.Signed, role string) error { } if err := verifier.Verify(msg, sig.Signature); err != nil { + // FIXME: don't err out on the 1st bad signature. return ErrInvalid } // Only consider this key valid if we haven't seen any of it's // key ids before. - if _, ok := seen[sig.KeyID]; !ok { - for _, id := range verifier.MarshalPublicKey().IDs() { - seen[id] = struct{}{} + // Careful: we must not rely on the key IDs _declared in the file_, + // instead we get to decide what key IDs this key correspond to. + // XXX dangerous; better stop supporting multiple key IDs altogether. + keyIDs := verifier.MarshalPublicKey().IDs() + wasKeySeen := false + for _, keyID := range keyIDs { + if _, present := verifiedKeyIDs[keyID]; present { + wasKeySeen = true + } + } + if !wasKeySeen { + for _, id := range keyIDs { + verifiedKeyIDs[id] = struct{}{} } - valid++ + numVerifiedKeys++ } } - if valid < roleData.Threshold { - return ErrRoleThreshold{roleData.Threshold, valid} + if numVerifiedKeys < roleData.Threshold { + return ErrRoleThreshold{roleData.Threshold, numVerifiedKeys} } return nil } -func (db *DB) Unmarshal(b []byte, v interface{}, role string, minVersion int) error { +func (db *DB) Unmarshal(b []byte, v interface{}, role string, minVersion int64) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err @@ -136,7 +147,7 @@ func (db *DB) Unmarshal(b []byte, v interface{}, role string, minVersion int) er } // UnmarshalExpired is exactly like Unmarshal except ignores expired timestamp error. -func (db *DB) UnmarshalIgnoreExpired(b []byte, v interface{}, role string, minVersion int) error { +func (db *DB) UnmarshalIgnoreExpired(b []byte, v interface{}, role string, minVersion int64) error { s := &data.Signed{} if err := json.Unmarshal(b, s); err != nil { return err diff --git a/vendor/github.com/tjfoc/gmsm/LICENSE b/vendor/github.com/tjfoc/gmsm/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/tjfoc/gmsm/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tjfoc/gmsm/sm3/sm3.go b/vendor/github.com/tjfoc/gmsm/sm3/sm3.go new file mode 100644 index 0000000000..1a610b9932 --- /dev/null +++ b/vendor/github.com/tjfoc/gmsm/sm3/sm3.go @@ -0,0 +1,260 @@ +/* +Copyright Suzhou Tongji Fintech Research Institute 2017 All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sm3 + +import ( + "encoding/binary" + "hash" +) + +type SM3 struct { + digest [8]uint32 // digest represents the partial evaluation of V + length uint64 // length of the message + unhandleMsg []byte // uint8 // +} + +func (sm3 *SM3) ff0(x, y, z uint32) uint32 { return x ^ y ^ z } + +func (sm3 *SM3) ff1(x, y, z uint32) uint32 { return (x & y) | (x & z) | (y & z) } + +func (sm3 *SM3) gg0(x, y, z uint32) uint32 { return x ^ y ^ z } + +func (sm3 *SM3) gg1(x, y, z uint32) uint32 { return (x & y) | (^x & z) } + +func (sm3 *SM3) p0(x uint32) uint32 { return x ^ sm3.leftRotate(x, 9) ^ sm3.leftRotate(x, 17) } + +func (sm3 *SM3) p1(x uint32) uint32 { return x ^ sm3.leftRotate(x, 15) ^ sm3.leftRotate(x, 23) } + +func (sm3 *SM3) leftRotate(x uint32, i uint32) uint32 { return (x<<(i%32) | x>>(32-i%32)) } + +func (sm3 *SM3) pad() []byte { + msg := sm3.unhandleMsg + msg = append(msg, 0x80) // Append '1' + blockSize := 64 // Append until the resulting message length (in bits) is congruent to 448 (mod 512) + for len(msg)%blockSize != 56 { + msg = append(msg, 0x00) + } + // append message length + msg = append(msg, uint8(sm3.length>>56&0xff)) + msg = append(msg, uint8(sm3.length>>48&0xff)) + msg = append(msg, uint8(sm3.length>>40&0xff)) + msg = append(msg, uint8(sm3.length>>32&0xff)) + msg = append(msg, uint8(sm3.length>>24&0xff)) + msg = append(msg, uint8(sm3.length>>16&0xff)) + msg = append(msg, uint8(sm3.length>>8&0xff)) + msg = append(msg, uint8(sm3.length>>0&0xff)) + + if len(msg)%64 != 0 { + panic("------SM3 Pad: error msgLen =") + } + return msg +} + +func (sm3 *SM3) update(msg []byte, nblocks int) { + var w [68]uint32 + var w1 [64]uint32 + + a, b, c, d, e, f, g, h := sm3.digest[0], sm3.digest[1], sm3.digest[2], sm3.digest[3], sm3.digest[4], sm3.digest[5], sm3.digest[6], sm3.digest[7] + for len(msg) >= 64 { + for i := 0; i < 16; i++ { + w[i] = binary.BigEndian.Uint32(msg[4*i : 4*(i+1)]) + } + for i := 16; i < 68; i++ { + w[i] = sm3.p1(w[i-16]^w[i-9]^sm3.leftRotate(w[i-3], 15)) ^ sm3.leftRotate(w[i-13], 7) ^ w[i-6] + } + for i := 0; i < 64; i++ { + w1[i] = w[i] ^ w[i+4] + } + A, B, C, D, E, F, G, H := a, b, c, d, e, f, g, h + for i := 0; i < 16; i++ { + SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x79cc4519, uint32(i)), 7) + SS2 := SS1 ^ sm3.leftRotate(A, 12) + TT1 := sm3.ff0(A, B, C) + D + SS2 + w1[i] + TT2 := sm3.gg0(E, F, G) + H + SS1 + w[i] + D = C + C = sm3.leftRotate(B, 9) + B = A + A = TT1 + H = G + G = sm3.leftRotate(F, 19) + F = E + E = sm3.p0(TT2) + } + for i := 16; i < 64; i++ { + SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x7a879d8a, uint32(i)), 7) + SS2 := SS1 ^ sm3.leftRotate(A, 12) + TT1 := sm3.ff1(A, B, C) + D + SS2 + w1[i] + TT2 := sm3.gg1(E, F, G) + H + SS1 + w[i] + D = C + C = sm3.leftRotate(B, 9) + B = A + A = TT1 + H = G + G = sm3.leftRotate(F, 19) + F = E + E = sm3.p0(TT2) + } + a ^= A + b ^= B + c ^= C + d ^= D + e ^= E + f ^= F + g ^= G + h ^= H + msg = msg[64:] + } + sm3.digest[0], sm3.digest[1], sm3.digest[2], sm3.digest[3], sm3.digest[4], sm3.digest[5], sm3.digest[6], sm3.digest[7] = a, b, c, d, e, f, g, h +} +func (sm3 *SM3) update2(msg []byte, nblocks int)([8]uint32){ + var w [68]uint32 + var w1 [64]uint32 + + a, b, c, d, e, f, g, h := sm3.digest[0], sm3.digest[1], sm3.digest[2], sm3.digest[3], sm3.digest[4], sm3.digest[5], sm3.digest[6], sm3.digest[7] + for len(msg) >= 64 { + for i := 0; i < 16; i++ { + w[i] = binary.BigEndian.Uint32(msg[4*i : 4*(i+1)]) + } + for i := 16; i < 68; i++ { + w[i] = sm3.p1(w[i-16]^w[i-9]^sm3.leftRotate(w[i-3], 15)) ^ sm3.leftRotate(w[i-13], 7) ^ w[i-6] + } + for i := 0; i < 64; i++ { + w1[i] = w[i] ^ w[i+4] + } + A, B, C, D, E, F, G, H := a, b, c, d, e, f, g, h + for i := 0; i < 16; i++ { + SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x79cc4519, uint32(i)), 7) + SS2 := SS1 ^ sm3.leftRotate(A, 12) + TT1 := sm3.ff0(A, B, C) + D + SS2 + w1[i] + TT2 := sm3.gg0(E, F, G) + H + SS1 + w[i] + D = C + C = sm3.leftRotate(B, 9) + B = A + A = TT1 + H = G + G = sm3.leftRotate(F, 19) + F = E + E = sm3.p0(TT2) + } + for i := 16; i < 64; i++ { + SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x7a879d8a, uint32(i)), 7) + SS2 := SS1 ^ sm3.leftRotate(A, 12) + TT1 := sm3.ff1(A, B, C) + D + SS2 + w1[i] + TT2 := sm3.gg1(E, F, G) + H + SS1 + w[i] + D = C + C = sm3.leftRotate(B, 9) + B = A + A = TT1 + H = G + G = sm3.leftRotate(F, 19) + F = E + E = sm3.p0(TT2) + } + a ^= A + b ^= B + c ^= C + d ^= D + e ^= E + f ^= F + g ^= G + h ^= H + msg = msg[64:] + } + var digest [8]uint32 + digest[0], digest[1], digest[2], digest[3], digest[4], digest[5], digest[6], digest[7] = a, b, c, d, e, f, g, h + return digest +} +func New() hash.Hash { + var sm3 SM3 + + sm3.Reset() + return &sm3 +} + +// BlockSize, required by the hash.Hash interface. +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (sm3 *SM3) BlockSize() int { return 64 } + +// Size, required by the hash.Hash interface. +// Size returns the number of bytes Sum will return. +func (sm3 *SM3) Size() int { return 32 } + +// Reset clears the internal state by zeroing bytes in the state buffer. +// This can be skipped for a newly-created hash state; the default zero-allocated state is correct. +func (sm3 *SM3) Reset() { + // Reset digest + sm3.digest[0] = 0x7380166f + sm3.digest[1] = 0x4914b2b9 + sm3.digest[2] = 0x172442d7 + sm3.digest[3] = 0xda8a0600 + sm3.digest[4] = 0xa96f30bc + sm3.digest[5] = 0x163138aa + sm3.digest[6] = 0xe38dee4d + sm3.digest[7] = 0xb0fb0e4e + + sm3.length = 0 // Reset numberic states + sm3.unhandleMsg = []byte{} +} + +// Write, required by the hash.Hash interface. +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (sm3 *SM3) Write(p []byte) (int, error) { + toWrite := len(p) + sm3.length += uint64(len(p) * 8) + msg := append(sm3.unhandleMsg, p...) + nblocks := len(msg) / sm3.BlockSize() + sm3.update(msg, nblocks) + // Update unhandleMsg + sm3.unhandleMsg = msg[nblocks*sm3.BlockSize():] + + return toWrite, nil +} + +// Sum, required by the hash.Hash interface. +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (sm3 *SM3) Sum(in []byte) []byte { + sm3.Write(in) + msg := sm3.pad() + //Finialize + digest:=sm3.update2(msg, len(msg)/sm3.BlockSize()) + + // save hash to in + needed := sm3.Size() + if cap(in)-len(in) < needed { + newIn := make([]byte, len(in), len(in)+needed) + copy(newIn, in) + in = newIn + } + out := in[len(in) : len(in)+needed] + for i := 0; i < 8; i++ { + binary.BigEndian.PutUint32(out[i*4:], digest[i]) + } + return out + +} + +func Sm3Sum(data []byte) []byte { + var sm3 SM3 + + sm3.Reset() + sm3.Write(data) + return sm3.Sum(nil) +} diff --git a/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md new file mode 100644 index 0000000000..43de4c9d47 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/transparency-dev/merkle/LICENSE b/vendor/github.com/transparency-dev/merkle/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/transparency-dev/merkle/README.md b/vendor/github.com/transparency-dev/merkle/README.md new file mode 100644 index 0000000000..3c8d212711 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/README.md @@ -0,0 +1,25 @@ +# Merkle + +[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle) +[![Go Report +Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle) +[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle) +[![Slack +Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/) + +## Overview + +This repository contains Go code to help create and manipulate Merkle trees, as +well as constructing and verifying various types of proof. + +This is the data structure which is used by projects such as +[Trillian](https://github.com/google/trillian) to provide +[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log). + + +## Support +* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency +* Slack: https://gtrillian.slack.com/ (invitation) + + + diff --git a/vendor/github.com/transparency-dev/merkle/compact/nodes.go b/vendor/github.com/transparency-dev/merkle/compact/nodes.go new file mode 100644 index 0000000000..c53a96a4c3 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/nodes.go @@ -0,0 +1,89 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compact + +import "math/bits" + +// NodeID identifies a node of a Merkle tree. +// +// The ID consists of a level and index within this level. Levels are numbered +// from 0, which corresponds to the tree leaves. Within each level, nodes are +// numbered with consecutive indices starting from 0. +// +// L4: ┌───────0───────┐ ... +// L3: ┌───0───┐ ┌───1───┐ ┌─── ... +// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ... +// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ... +// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ... +// +// When the tree is not perfect, the nodes that would complement it to perfect +// are called ephemeral. Algorithms that operate with ephemeral nodes still map +// them to the same address space. +type NodeID struct { + Level uint + Index uint64 +} + +// NewNodeID returns a NodeID with the passed in node coordinates. +func NewNodeID(level uint, index uint64) NodeID { + return NodeID{Level: level, Index: index} +} + +// Parent returns the ID of the parent node. +func (id NodeID) Parent() NodeID { + return NewNodeID(id.Level+1, id.Index>>1) +} + +// Sibling returns the ID of the sibling node. +func (id NodeID) Sibling() NodeID { + return NewNodeID(id.Level, id.Index^1) +} + +// Coverage returns the [begin, end) range of leaves covered by the node. +func (id NodeID) Coverage() (uint64, uint64) { + return id.Index << id.Level, (id.Index + 1) << id.Level +} + +// RangeNodes appends the IDs of the nodes that comprise the [begin, end) +// compact range to the given slice, and returns the new slice. The caller may +// pre-allocate space with the help of the RangeSize function. +func RangeNodes(begin, end uint64, ids []NodeID) []NodeID { + left, right := Decompose(begin, end) + + pos := begin + // Iterate over perfect subtrees along the left border of the range, ordered + // from lower to upper levels. + for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit { + level := uint(bits.TrailingZeros64(left)) + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + // Iterate over perfect subtrees along the right border of the range, ordered + // from upper to lower levels. + for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit { + level := uint(bits.Len64(right)) - 1 + bit = uint64(1) << level + ids = append(ids, NewNodeID(level, pos>>level)) + } + + return ids +} + +// RangeSize returns the number of nodes in the [begin, end) compact range. +func RangeSize(begin, end uint64) int { + left, right := Decompose(begin, end) + return bits.OnesCount64(left) + bits.OnesCount64(right) +} diff --git a/vendor/github.com/transparency-dev/merkle/compact/range.go b/vendor/github.com/transparency-dev/merkle/compact/range.go new file mode 100644 index 0000000000..a34c0be973 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/compact/range.go @@ -0,0 +1,264 @@ +// Copyright 2019 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compact provides compact Merkle tree data structures. +package compact + +import ( + "bytes" + "errors" + "fmt" + "math/bits" +) + +// HashFn computes an internal node's hash using the hashes of its child nodes. +type HashFn func(left, right []byte) []byte + +// VisitFn visits the node with the specified ID and hash. +type VisitFn func(id NodeID, hash []byte) + +// RangeFactory allows creating compact ranges with the specified hash +// function, which must not be nil, and must not be changed. +type RangeFactory struct { + Hash HashFn +} + +// NewRange creates a Range for [begin, end) with the given set of hashes. The +// hashes correspond to the roots of the minimal set of perfect sub-trees +// covering the [begin, end) leaves range, ordered left to right. +func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) { + if end < begin { + return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin) + } + if got, want := len(hashes), RangeSize(begin, end); got != want { + return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want) + } + return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil +} + +// NewEmptyRange returns a new Range for an empty [begin, begin) range. The +// value of begin defines where the range will start growing from when entries +// are appended to it. +func (f *RangeFactory) NewEmptyRange(begin uint64) *Range { + return &Range{f: f, begin: begin, end: begin} +} + +// Range represents a compact Merkle tree range for leaf indices [begin, end). +// +// It contains the minimal set of perfect subtrees whose leaves comprise this +// range. The structure is efficiently mergeable with other compact ranges that +// share one of the endpoints with it. +// +// For more details, see +// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md. +type Range struct { + f *RangeFactory + begin uint64 + end uint64 + hashes [][]byte +} + +// Begin returns the first index covered by the range (inclusive). +func (r *Range) Begin() uint64 { + return r.begin +} + +// End returns the last index covered by the range (exclusive). +func (r *Range) End() uint64 { + return r.end +} + +// Hashes returns sub-tree hashes corresponding to the minimal set of perfect +// sub-trees covering the [begin, end) range, ordered left to right. +func (r *Range) Hashes() [][]byte { + return r.hashes +} + +// Append extends the compact range by appending the passed in hash to it. It +// reports all the added nodes through the visitor function (if non-nil). +func (r *Range) Append(hash []byte, visitor VisitFn) error { + if visitor != nil { + visitor(NewNodeID(0, r.end), hash) + } + return r.appendImpl(r.end+1, hash, nil, visitor) +} + +// AppendRange extends the compact range by merging in the other compact range +// from the right. It uses the tree hasher to calculate hashes of newly created +// nodes, and reports them through the visitor function (if non-nil). +func (r *Range) AppendRange(other *Range, visitor VisitFn) error { + if other.f != r.f { + return errors.New("incompatible ranges") + } + if got, want := other.begin, r.end; got != want { + return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want) + } + if len(other.hashes) == 0 { // The other range is empty, merging is trivial. + return nil + } + return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor) +} + +// GetRootHash returns the root hash of the Merkle tree represented by this +// compact range. Requires the range to start at index 0. If the range is +// empty, returns nil. +// +// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the +// ones rooting imperfect subtrees) along the right border of the tree. +func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) { + if r.begin != 0 { + return nil, fmt.Errorf("begin=%d, want 0", r.begin) + } + ln := len(r.hashes) + if ln == 0 { + return nil, nil + } + hash := r.hashes[ln-1] + // All non-perfect subtree hashes along the right border of the tree + // correspond to the parents of all perfect subtree nodes except the lowest + // one (therefore the loop skips it). + for i, size := ln-2, r.end; i >= 0; i-- { + hash = r.f.Hash(r.hashes[i], hash) + if visitor != nil { + size &= size - 1 // Delete the previous node. + level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level. + index := size >> level // And its horizontal index. + visitor(NewNodeID(level, index), hash) + } + } + return hash, nil +} + +// Equal compares two Ranges for equality. +func (r *Range) Equal(other *Range) bool { + if r.f != other.f || r.begin != other.begin || r.end != other.end { + return false + } + if len(r.hashes) != len(other.hashes) { + return false + } + for i := range r.hashes { + if !bytes.Equal(r.hashes[i], other.hashes[i]) { + return false + } + } + return true +} + +// appendImpl extends the compact range by merging the [r.end, end) compact +// range into it. The other compact range is decomposed into a seed hash and +// all the other hashes (possibly none). The method uses the tree hasher to +// calculate hashes of newly created nodes, and reports them through the +// visitor function (if non-nil). +func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error { + // Bits [low, high) of r.end encode the merge path, i.e. the sequence of node + // merges that transforms the two compact ranges into one. + low, high := getMergePath(r.begin, r.end, end) + if high < low { + high = low + } + index := r.end >> low + // Now bits [0, high-low) of index encode the merge path. + + // The number of one bits in index is the number of nodes from the left range + // that will be merged, and zero bits correspond to the nodes in the right + // range. Below we make sure that both ranges have enough hashes, which can + // be false only in case the data is corrupted in some way. + ones := bits.OnesCount64(index & (1<<(high-low) - 1)) + if ln := len(r.hashes); ln < ones { + return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones) + } + if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros { + return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1) + } + + // Some of the trailing nodes of the left compact range, and some of the + // leading nodes of the right range, are sequentially merged with the seed, + // according to the mask. All new nodes are reported through the visitor. + idx1, idx2 := len(r.hashes), 0 + for h := low; h < high; h++ { + if index&1 == 0 { + seed = r.f.Hash(seed, hashes[idx2]) + idx2++ + } else { + idx1-- + seed = r.f.Hash(r.hashes[idx1], seed) + } + index >>= 1 + if visitor != nil { + visitor(NewNodeID(h+1, index), seed) + } + } + + // All nodes from both ranges that have not been merged are bundled together + // with the "merged" seed node. + r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...) + r.end = end + return nil +} + +// getMergePath returns the merging path between the compact range [begin, mid) +// and [mid, end). The path is represented as a range of bits within mid, with +// bit indices [low, high). A bit value of 1 on level i of mid means that the +// node on this level merges with the corresponding node in the left compact +// range, whereas 0 represents merging with the right compact range. If the +// path is empty then high <= low. +// +// The output is not specified if begin <= mid <= end doesn't hold, but the +// function never panics. +func getMergePath(begin, mid, end uint64) (uint, uint) { + low := bits.TrailingZeros64(mid) + high := 64 + if begin != 0 { + high = bits.Len64(mid ^ (begin - 1)) + } + if high2 := bits.Len64((mid - 1) ^ end); high2 < high { + high = high2 + } + return uint(low), uint(high - 1) +} + +// Decompose splits the [begin, end) range into a minimal number of sub-ranges, +// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for +// some integers m, k >= 0. +// +// The sequence of sizes is returned encoded as bitmasks left and right, where: +// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k +// - left mask bits in LSB-to-MSB order encode the left part of the sequence +// - right mask bits in MSB-to-LSB order encode the right part +// +// The corresponding values of m are not returned (they can be calculated from +// begin and the sub-range sizes). +// +// For example, (begin, end) values of (0b110, 0b11101) would indicate a +// sequence of tree sizes: 2,8; 8,4,1. +// +// The output is not specified if begin > end, but the function never panics. +func Decompose(begin, end uint64) (uint64, uint64) { + // Special case, as the code below works only if begin != 0, or end < 2^63. + if begin == 0 { + return 0, end + } + xbegin := begin - 1 + // Find where paths to leaves #begin-1 and #end diverge, and mask the upper + // bits away, as only the nodes strictly below this point are in the range. + d := bits.Len64(xbegin^end) - 1 + mask := uint64(1)<= size { + return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size) + } + return nodes(index, 0, size).skipFirst(), nil +} + +// Consistency returns the information on how to fetch and construct a +// consistency proof between the two given tree sizes of a log Merkle tree. It +// requires 0 <= size1 <= size2. +func Consistency(size1, size2 uint64) (Nodes, error) { + if size1 > size2 { + return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2) + } + if size1 == size2 || size1 == 0 { + return Nodes{IDs: []compact.NodeID{}}, nil + } + + // Find the root of the biggest perfect subtree that ends at size1. + level := uint(bits.TrailingZeros64(size1)) + index := (size1 - 1) >> level + // The consistency proof consists of this node (except if size1 is a power of + // two, in which case adding this node would be redundant because the client + // is assumed to know it from a checkpoint), and nodes of the inclusion proof + // into this node in the tree of size2. + p := nodes(index, level, size2) + + // Handle the case when size1 is a power of 2. + if index == 0 { + return p.skipFirst(), nil + } + return p, nil +} + +// nodes returns the node IDs necessary to prove that the (level, index) node +// is included in the Merkle tree of the given size. +func nodes(index uint64, level uint, size uint64) Nodes { + // Compute the `fork` node, where the path from root to (level, index) node + // diverges from the path to (0, size). + // + // The sibling of this node is the ephemeral node which represents a subtree + // that is not complete in the tree of the given size. To compute the hash + // of the ephemeral node, we need all the non-ephemeral nodes that cover the + // same range of leaves. + // + // The `inner` variable is how many layers up from (level, index) the `fork` + // and the ephemeral nodes are. + inner := bits.Len64(index^(size>>level)) - 1 + fork := compact.NewNodeID(level+uint(inner), index>>inner) + + begin, end := fork.Coverage() + left := compact.RangeSize(0, begin) + right := compact.RangeSize(end, size) + + node := compact.NewNodeID(level, index) + // Pre-allocate the exact number of nodes for the proof, in order: + // - The seed node for which we are building the proof. + // - The `inner` nodes at each level up to the fork node. + // - The `right` nodes, comprising the ephemeral node. + // - The `left` nodes, completing the coverage of the whole [0, size) range. + nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node) + + // The first portion of the proof consists of the siblings for nodes of the + // path going up to the level at which the ephemeral node appears. + for ; node.Level < fork.Level; node = node.Parent() { + nodes = append(nodes, node.Sibling()) + } + // This portion of the proof covers the range [begin, end) under it. The + // ranges to the left and to the right from it remain to be covered. + + // Add all the nodes (potentially none) that cover the right range, and + // represent the ephemeral node. Reverse them so that the Rehash method can + // process hashes in the convenient order, from lower to upper levels. + len1 := len(nodes) + nodes = compact.RangeNodes(end, size, nodes) + reverse(nodes[len(nodes)-right:]) + len2 := len(nodes) + // Add the nodes that cover the left range, ordered increasingly by level. + nodes = compact.RangeNodes(0, begin, nodes) + reverse(nodes[len(nodes)-left:]) + + // nodes[len1:len2] contains the nodes representing the ephemeral node. If + // it's empty, make it zero. Note that it can also contain a single node. + // Depending on the preference of the layer above, it may or may not be + // considered ephemeral. + if len1 >= len2 { + len1, len2 = 0, 0 + } + + return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()} +} + +// Ephem returns the ephemeral node, and indices begin and end, such that +// IDs[begin:end] slice contains the child nodes of the ephemeral node. +// +// The list is empty iff there are no ephemeral nodes in the proof. Some +// examples of when this can happen: a proof in a perfect tree; an inclusion +// proof for a leaf in a perfect subtree at the right edge of the tree. +func (n Nodes) Ephem() (compact.NodeID, int, int) { + return n.ephem, n.begin, n.end +} + +// Rehash computes the proof based on the slice of node hashes corresponding to +// their IDs in the n.IDs field. The slices must be of the same length. The hc +// parameter computes a node's hash based on hashes of its children. +// +// Warning: The passed-in slice of hashes can be modified in-place. +func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) { + if got, want := len(h), len(n.IDs); got != want { + return nil, fmt.Errorf("got %d hashes but expected %d", got, want) + } + cursor := 0 + // Scan the list of node hashes, and store the rehashed list in-place. + // Invariant: cursor <= i, and h[:cursor] contains all the hashes of the + // rehashed list after scanning h up to index i-1. + for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 { + hash := h[i] + if i >= n.begin && i < n.end { + // Scan the block of node hashes that need rehashing. + for i++; i < n.end; i++ { + hash = hc(h[i], hash) + } + i-- + } + h[cursor] = hash + } + return h[:cursor], nil +} + +func (n Nodes) skipFirst() Nodes { + n.IDs = n.IDs[1:] + // Fixup the indices into the IDs slice. + if n.begin < n.end { + n.begin-- + n.end-- + } + return n +} + +func reverse(ids []compact.NodeID) { + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } +} diff --git a/vendor/github.com/transparency-dev/merkle/proof/verify.go b/vendor/github.com/transparency-dev/merkle/proof/verify.go new file mode 100644 index 0000000000..d42e1afe36 --- /dev/null +++ b/vendor/github.com/transparency-dev/merkle/proof/verify.go @@ -0,0 +1,176 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package proof + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + + "github.com/transparency-dev/merkle" +) + +// RootMismatchError occurs when an inclusion proof fails. +type RootMismatchError struct { + ExpectedRoot []byte + CalculatedRoot []byte +} + +func (e RootMismatchError) Error() string { + return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) +} + +func verifyMatch(calculated, expected []byte) error { + if !bytes.Equal(calculated, expected) { + return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated} + } + return nil +} + +// VerifyInclusion verifies the correctness of the inclusion proof for the leaf +// with the specified hash and index, relatively to the tree of the given size +// and root hash. Requires 0 <= index < size. +func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error { + calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof) + if err != nil { + return err + } + return verifyMatch(calcRoot, root) +} + +// RootFromInclusionProof calculates the expected root hash for a tree of the +// given size, provided a leaf index and hash with the corresponding inclusion +// proof. Requires 0 <= index < size. +func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) { + if index >= size { + return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size) + } + if got, want := len(leafHash), hasher.Size(); got != want { + return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) + } + + inner, border := decompInclProof(index, size) + if got, want := len(proof), inner+border; got != want { + return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) + } + + res := chainInner(hasher, leafHash, proof[:inner], index) + res = chainBorderRight(hasher, res, proof[inner:]) + return res, nil +} + +// VerifyConsistency checks that the passed-in consistency proof is valid +// between the passed in tree sizes, with respect to the corresponding root +// hashes. Requires 0 <= size1 <= size2. +func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error { + switch { + case size2 < size1: + return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2) + case size1 == size2: + if len(proof) > 0 { + return errors.New("size1=size2, but proof is not empty") + } + return verifyMatch(root1, root2) + case size1 == 0: + // Any size greater than 0 is consistent with size 0. + if len(proof) > 0 { + return fmt.Errorf("expected empty proof, but got %d components", len(proof)) + } + return nil // Proof OK. + case len(proof) == 0: + return errors.New("empty proof") + } + + inner, border := decompInclProof(size1-1, size2) + shift := bits.TrailingZeros64(size1) + inner -= shift // Note: shift < inner if size1 < size2. + + // The proof includes the root hash for the sub-tree of size 2^shift. + seed, start := proof[0], 1 + if size1 == 1<> uint(shift) // Start chaining from level |shift|. + hash1 := chainInnerRight(hasher, seed, proof[:inner], mask) + hash1 = chainBorderRight(hasher, hash1, proof[inner:]) + if err := verifyMatch(hash1, root1); err != nil { + return err + } + + // Verify the second root. + hash2 := chainInner(hasher, seed, proof[:inner], mask) + hash2 = chainBorderRight(hasher, hash2, proof[inner:]) + return verifyMatch(hash2, root2) +} + +// decompInclProof breaks down inclusion proof for a leaf at the specified +// |index| in a tree of the specified |size| into 2 components. The splitting +// point between them is where paths to leaves |index| and |size-1| diverge. +// Returns lengths of the bottom and upper proof parts correspondingly. The sum +// of the two determines the correct length of the inclusion proof. +func decompInclProof(index, size uint64) (int, int) { + inner := innerProofSize(index, size) + border := bits.OnesCount64(index >> uint(inner)) + return inner, border +} + +func innerProofSize(index, size uint64) int { + return bits.Len64(index ^ (size - 1)) +} + +// chainInner computes a subtree hash for a node on or below the tree's right +// border. Assumes |proof| hashes are ordered from lower levels to upper, and +// |seed| is the initial subtree/leaf hash on the path located at the specified +// |index| on its level. +func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 0 { + seed = hasher.HashChildren(seed, h) + } else { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainInnerRight computes a subtree hash like chainInner, but only takes +// hashes to the left from the path into consideration, which effectively means +// the result is a hash of the corresponding earlier version of this subtree. +func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { + for i, h := range proof { + if (index>>uint(i))&1 == 1 { + seed = hasher.HashChildren(h, seed) + } + } + return seed +} + +// chainBorderRight chains proof hashes along tree borders. This differs from +// inner chaining because |proof| contains only left-side subtree hashes. +func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte { + for _, h := range proof { + seed = hasher.HashChildren(h, seed) + } + return seed +} diff --git a/vendor/github.com/google/trillian/merkle/rfc6962/rfc6962.go b/vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go similarity index 100% rename from vendor/github.com/google/trillian/merkle/rfc6962/rfc6962.go rename to vendor/github.com/transparency-dev/merkle/rfc6962/rfc6962.go diff --git a/vendor/github.com/urfave/cli/.gitignore b/vendor/github.com/urfave/cli/.gitignore index 9c2506032c..8ae196feed 100644 --- a/vendor/github.com/urfave/cli/.gitignore +++ b/vendor/github.com/urfave/cli/.gitignore @@ -1,4 +1,5 @@ *.coverprofile +coverage.txt node_modules/ vendor -.idea \ No newline at end of file +.idea diff --git a/vendor/github.com/urfave/cli/command.go b/vendor/github.com/urfave/cli/command.go index f02d3589ff..09fda1642f 100644 --- a/vendor/github.com/urfave/cli/command.go +++ b/vendor/github.com/urfave/cli/command.go @@ -226,18 +226,23 @@ func reorderArgs(commandFlags []Flag, args []string) []string { nextIndexMayContainValue := false for i, arg := range args { - // dont reorder any args after a -- - // read about -- here: - // https://unix.stackexchange.com/questions/11376/what-does-double-dash-mean-also-known-as-bare-double-dash - if arg == "--" { - remainingArgs = append(remainingArgs, args[i:]...) - break - - // checks if this arg is a value that should be re-ordered next to its associated flag - } else if nextIndexMayContainValue && !strings.HasPrefix(arg, "-") { + // if we're expecting an option-value, check if this arg is a value, in + // which case it should be re-ordered next to its associated flag + if nextIndexMayContainValue && !argIsFlag(commandFlags, arg) { nextIndexMayContainValue = false reorderedArgs = append(reorderedArgs, arg) - + } else if arg == "--" { + // don't reorder any args after the -- delimiter As described in the POSIX spec: + // https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap12.html#tag_12_02 + // > Guideline 10: + // > The first -- argument that is not an option-argument should be accepted + // > as a delimiter indicating the end of options. Any following arguments + // > should be treated as operands, even if they begin with the '-' character. + + // make sure the "--" delimiter itself is at the start + remainingArgs = append([]string{"--"}, remainingArgs...) + remainingArgs = append(remainingArgs, args[i+1:]...) + break // checks if this is an arg that should be re-ordered } else if argIsFlag(commandFlags, arg) { // we have determined that this is a flag that we should re-order @@ -256,8 +261,9 @@ func reorderArgs(commandFlags []Flag, args []string) []string { // argIsFlag checks if an arg is one of our command flags func argIsFlag(commandFlags []Flag, arg string) bool { - // checks if this is just a `-`, and so definitely not a flag - if arg == "-" { + if arg == "-" || arg == "--"{ + // `-` is never a flag + // `--` is an option-value when following a flag, and a delimiter indicating the end of options in other cases. return false } // flags always start with a - diff --git a/vendor/github.com/urfave/cli/flag.go b/vendor/github.com/urfave/cli/flag.go index 1cfa1cdb21..5b7ae6c3f0 100644 --- a/vendor/github.com/urfave/cli/flag.go +++ b/vendor/github.com/urfave/cli/flag.go @@ -338,8 +338,10 @@ func flagFromFileEnv(filePath, envName string) (val string, ok bool) { } } for _, fileVar := range strings.Split(filePath, ",") { - if data, err := ioutil.ReadFile(fileVar); err == nil { - return string(data), true + if fileVar != "" { + if data, err := ioutil.ReadFile(fileVar); err == nil { + return string(data), true + } } } return "", false diff --git a/vendor/github.com/xanzy/go-gitlab/.gitignore b/vendor/github.com/xanzy/go-gitlab/.gitignore index 19b0dcfbd4..78af853b3e 100644 --- a/vendor/github.com/xanzy/go-gitlab/.gitignore +++ b/vendor/github.com/xanzy/go-gitlab/.gitignore @@ -26,3 +26,5 @@ _testmain.go # IDE specific files and folders .idea *.iml +*.swp +*.swo diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md index cd409e8e0f..a7fd7cfbb5 100644 --- a/vendor/github.com/xanzy/go-gitlab/README.md +++ b/vendor/github.com/xanzy/go-gitlab/README.md @@ -31,6 +31,7 @@ to add new and/or missing endpoints. Currently, the following services are suppo - [x] Environments - [x] Epic Issues - [x] Epics +- [x] Error Tracking - [x] Events - [x] Feature Flags - [x] Geo Nodes @@ -74,6 +75,7 @@ to add new and/or missing endpoints. Currently, the following services are suppo - [x] Project Members - [x] Project Milestones - [x] Project Snippets +- [x] Project Vulnerabilities - [x] Project-Level Variables - [x] Projects (including setting Webhooks) - [x] Protected Branches @@ -90,6 +92,7 @@ to add new and/or missing endpoints. Currently, the following services are suppo - [x] System Hooks - [x] Tags - [x] Todos +- [x] Topics - [x] Users - [x] Validate CI Configuration - [x] Version @@ -129,7 +132,7 @@ to list all projects for user "svanharmelen": ```go git := gitlab.NewClient("yourtokengoeshere") -opt := &ListProjectsOptions{Search: gitlab.String("svanharmelen")} +opt := &gitlab.ListProjectsOptions{Search: gitlab.String("svanharmelen")} projects, _, err := git.Projects.ListProjects(opt) ``` diff --git a/vendor/github.com/xanzy/go-gitlab/boards.go b/vendor/github.com/xanzy/go-gitlab/boards.go index fa90b92648..a75c9bda42 100644 --- a/vendor/github.com/xanzy/go-gitlab/boards.go +++ b/vendor/github.com/xanzy/go-gitlab/boards.go @@ -33,11 +33,21 @@ type IssueBoardsService struct { // // GitLab API docs: https://docs.gitlab.com/ce/api/boards.html type IssueBoard struct { - ID int `json:"id"` - Name string `json:"name"` - Project *Project `json:"project"` - Milestone *Milestone `json:"milestone"` - Lists []*BoardList `json:"lists"` + ID int `json:"id"` + Name string `json:"name"` + Project *Project `json:"project"` + Milestone *Milestone `json:"milestone"` + Assignee *struct { + ID int `json:"id"` + Username string `json:"username"` + Name string `json:"name"` + State string `json:"state"` + AvatarURL string `json:"avatar_url"` + WebURL string `json:"web_url"` + } `json:"assignee"` + Lists []*BoardList `json:"lists"` + Weight int `json:"weight"` + Labels []*LabelDetails `json:"labels"` } func (b IssueBoard) String() string { @@ -48,9 +58,18 @@ func (b IssueBoard) String() string { // // GitLab API docs: https://docs.gitlab.com/ce/api/boards.html type BoardList struct { - ID int `json:"id"` - Label *Label `json:"label"` - Position int `json:"position"` + ID int `json:"id"` + Assignee *struct { + ID int `json:"id"` + Name string `json:"name"` + Username string `json:"username"` + } `json:"assignee"` + Iteration *ProjectIteration `json:"iteration"` + Label *Label `json:"label"` + MaxIssueCount int `json:"max_issue_count"` + MaxIssueWeight int `json:"max_issue_weight"` + Milestone *Milestone `json:"milestone"` + Position int `json:"position"` } func (b BoardList) String() string { @@ -257,7 +276,10 @@ func (s *IssueBoardsService) GetIssueBoardList(pid interface{}, board, list int, // // GitLab API docs: https://docs.gitlab.com/ce/api/boards.html#new-board-list type CreateIssueBoardListOptions struct { - LabelID *int `url:"label_id" json:"label_id"` + LabelID *int `url:"label_id,omitempty" json:"label_id,omitempty"` + AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // CreateIssueBoardList creates a new issue board list. diff --git a/vendor/github.com/xanzy/go-gitlab/cluster_agents.go b/vendor/github.com/xanzy/go-gitlab/cluster_agents.go new file mode 100644 index 0000000000..907f753e1f --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/cluster_agents.go @@ -0,0 +1,294 @@ +// +// Copyright 2022, Timo Furrer +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ClusterAgentsService handles communication with the cluster agents related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html +type ClusterAgentsService struct { + client *Client +} + +// Agent represents a GitLab agent for Kubernetes. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/cluster_agents.html +type Agent struct { + ID int `json:"id"` + Name string `json:"name"` + CreatedAt *time.Time `json:"created_at"` + CreatedByUserID int `json:"created_by_user_id"` + ConfigProject ConfigProject `json:"config_project"` +} + +type ConfigProject struct { + ID int `json:"id"` + Description string `json:"description"` + Name string `json:"name"` + NameWithNamespace string `json:"name_with_namespace"` + Path string `json:"path"` + PathWithNamespace string `json:"path_with_namespace"` + CreatedAt *time.Time `json:"created_at"` +} + +func (a Agent) String() string { + return Stringify(a) +} + +// AgentToken represents a GitLab agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +type AgentToken struct { + ID int `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AgentID int `json:"agent_id"` + Status string `json:"status"` + CreatedAt *time.Time `json:"created_at"` + CreatedByUserID int `json:"created_by_user_id"` + LastUsedAt *time.Time `json:"last_used_at"` + Token string `json:"token"` +} + +func (a AgentToken) String() string { + return Stringify(a) +} + +// ListAgentsOptions represents the available ListAgents() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project +type ListAgentsOptions ListOptions + +// ListAgents returns a list of agents registered for the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-the-agents-for-a-project +func (s *ClusterAgentsService) ListAgents(pid interface{}, opt *ListAgentsOptions, options ...RequestOptionFunc) ([]*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) + if err != nil { + return nil, nil, err + } + + var as []*Agent + resp, err := s.client.Do(req, &as) + if err != nil { + return nil, resp, err + } + + return as, resp, err +} + +// GetAgent gets a single agent details. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#get-details-about-an-agent +func (s *ClusterAgentsService) GetAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) + if err != nil { + return nil, nil, err + } + + a := new(Agent) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, err +} + +// RegisterAgentOptions represents the available RegisterAgent() +// options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project +type RegisterAgentOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` +} + +// RegisterAgent registers an agent to the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#register-an-agent-with-a-project +func (s *ClusterAgentsService) RegisterAgent(pid interface{}, opt *RegisterAgentOptions, options ...RequestOptionFunc) (*Agent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) + if err != nil { + return nil, nil, err + } + + a := new(Agent) + resp, err := s.client.Do(req, a) + if err != nil { + return nil, resp, err + } + + return a, resp, err +} + +// DeleteAgent deletes an existing agent registration. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#delete-a-registered-agent +func (s *ClusterAgentsService) DeleteAgent(pid interface{}, id int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d", PathEscape(project), id) + + req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// ListAgentTokensOptions represents the available ListAgentTokens() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +type ListAgentTokensOptions ListOptions + +// ListAgentTokens returns a list of tokens for an agent. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#list-tokens-for-an-agent +func (s *ClusterAgentsService) ListAgentTokens(pid interface{}, aid int, opt *ListAgentTokensOptions, options ...RequestOptionFunc) ([]*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) + + req, err := s.client.NewRequest(http.MethodGet, uri, opt, options) + if err != nil { + return nil, nil, err + } + + var ats []*AgentToken + resp, err := s.client.Do(req, &ats) + if err != nil { + return nil, resp, err + } + + return ats, resp, err +} + +// GetAgentToken gets a single agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#get-a-single-agent-token +func (s *ClusterAgentsService) GetAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) + + req, err := s.client.NewRequest(http.MethodGet, uri, nil, options) + if err != nil { + return nil, nil, err + } + + at := new(AgentToken) + resp, err := s.client.Do(req, at) + if err != nil { + return nil, resp, err + } + + return at, resp, err +} + +// CreateAgentTokenOptions represents the available CreateAgentToken() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token +type CreateAgentTokenOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` +} + +// CreateAgentToken creates a new token for an agent. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#create-an-agent-token +func (s *ClusterAgentsService) CreateAgentToken(pid interface{}, aid int, opt *CreateAgentTokenOptions, options ...RequestOptionFunc) (*AgentToken, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens", PathEscape(project), aid) + + req, err := s.client.NewRequest(http.MethodPost, uri, opt, options) + if err != nil { + return nil, nil, err + } + + at := new(AgentToken) + resp, err := s.client.Do(req, at) + if err != nil { + return nil, resp, err + } + + return at, resp, err +} + +// RevokeAgentToken revokes an agent token. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/cluster_agents.html#revoke-an-agent-token +func (s *ClusterAgentsService) RevokeAgentToken(pid interface{}, aid int, id int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + uri := fmt.Sprintf("projects/%s/cluster_agents/%d/tokens/%d", PathEscape(project), aid, id) + + req, err := s.client.NewRequest(http.MethodDelete, uri, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go index 7ff5c380d4..67485d9693 100644 --- a/vendor/github.com/xanzy/go-gitlab/commits.go +++ b/vendor/github.com/xanzy/go-gitlab/commits.go @@ -35,23 +35,24 @@ type CommitsService struct { // // GitLab API docs: https://docs.gitlab.com/ce/api/commits.html type Commit struct { - ID string `json:"id"` - ShortID string `json:"short_id"` - Title string `json:"title"` - AuthorName string `json:"author_name"` - AuthorEmail string `json:"author_email"` - AuthoredDate *time.Time `json:"authored_date"` - CommitterName string `json:"committer_name"` - CommitterEmail string `json:"committer_email"` - CommittedDate *time.Time `json:"committed_date"` - CreatedAt *time.Time `json:"created_at"` - Message string `json:"message"` - ParentIDs []string `json:"parent_ids"` - Stats *CommitStats `json:"stats"` - Status *BuildStateValue `json:"status"` - LastPipeline *PipelineInfo `json:"last_pipeline"` - ProjectID int `json:"project_id"` - WebURL string `json:"web_url"` + ID string `json:"id"` + ShortID string `json:"short_id"` + Title string `json:"title"` + AuthorName string `json:"author_name"` + AuthorEmail string `json:"author_email"` + AuthoredDate *time.Time `json:"authored_date"` + CommitterName string `json:"committer_name"` + CommitterEmail string `json:"committer_email"` + CommittedDate *time.Time `json:"committed_date"` + CreatedAt *time.Time `json:"created_at"` + Message string `json:"message"` + ParentIDs []string `json:"parent_ids"` + Stats *CommitStats `json:"stats"` + Status *BuildStateValue `json:"status"` + LastPipeline *PipelineInfo `json:"last_pipeline"` + ProjectID int `json:"project_id"` + Trailers map[string]string `json:"trailers"` + WebURL string `json:"web_url"` } // CommitStats represents the number of added and deleted files in a commit. @@ -79,6 +80,7 @@ type ListCommitsOptions struct { All *bool `url:"all,omitempty" json:"all,omitempty"` WithStats *bool `url:"with_stats,omitempty" json:"with_stats,omitempty"` FirstParent *bool `url:"first_parent,omitempty" json:"first_parent,omitempty"` + Trailers *bool `url:"trailers,omitempty" json:"trailers,omitempty"` } // ListCommits gets a list of repository commits in a project. diff --git a/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go b/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go new file mode 100644 index 0000000000..4293478784 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/deployments_merge_requests.go @@ -0,0 +1,54 @@ +// +// Copyright 2022, Daniela Filipe Bento +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package gitlab + +import ( + "fmt" + "net/http" +) + +// DeploymentMergeRequestsService handles communication with the deployment's +// merge requests related methods of the GitLab API. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment +type DeploymentMergeRequestsService struct { + client *Client +} + +// ListDeploymentMergeRequests get the merge requests associated with deployment. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/deployments.html#list-of-merge-requests-associated-with-a-deployment +func (s *DeploymentMergeRequestsService) ListDeploymentMergeRequests(pid interface{}, deployment int, opts *ListMergeRequestsOptions, options ...RequestOptionFunc) ([]*MergeRequest, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/deployments/%d/merge_requests", PathEscape(project), deployment) + + req, err := s.client.NewRequest(http.MethodGet, u, opts, options) + if err != nil { + return nil, nil, err + } + + var mrs []*MergeRequest + resp, err := s.client.Do(req, &mrs) + if err != nil { + return nil, resp, err + } + + return mrs, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/environments.go b/vendor/github.com/xanzy/go-gitlab/environments.go index 7e0b2675a5..f49630d31e 100644 --- a/vendor/github.com/xanzy/go-gitlab/environments.go +++ b/vendor/github.com/xanzy/go-gitlab/environments.go @@ -38,6 +38,7 @@ type Environment struct { Name string `json:"name"` Slug string `json:"slug"` State string `json:"state"` + Tier string `json:"tier"` ExternalURL string `json:"external_url"` Project *Project `json:"project"` CreatedAt *time.Time `json:"created_at"` @@ -118,6 +119,7 @@ func (s *EnvironmentsService) GetEnvironment(pid interface{}, environment int, o type CreateEnvironmentOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` } // CreateEnvironment adds an environment to a project. This is an idempotent @@ -155,6 +157,7 @@ func (s *EnvironmentsService) CreateEnvironment(pid interface{}, opt *CreateEnvi type EditEnvironmentOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + Tier *string `url:"tier,omitempty" json:"tier,omitempty"` } // EditEnvironment updates a project team environment to a specified access level.. diff --git a/vendor/github.com/xanzy/go-gitlab/epics.go b/vendor/github.com/xanzy/go-gitlab/epics.go index cbd6b113fb..27744b0c1f 100644 --- a/vendor/github.com/xanzy/go-gitlab/epics.go +++ b/vendor/github.com/xanzy/go-gitlab/epics.go @@ -211,6 +211,7 @@ func (s *EpicsService) CreateEpic(gid interface{}, opt *CreateEpicOptions, optio // GitLab API docs: https://docs.gitlab.com/ee/api/epics.html#update-epic type UpdateEpicOptions struct { Title *string `url:"title,omitempty" json:"title,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` StartDateIsFixed *bool `url:"start_date_is_fixed,omitempty" json:"start_date_is_fixed,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/error_tracking.go b/vendor/github.com/xanzy/go-gitlab/error_tracking.go new file mode 100644 index 0000000000..7a0f067fa4 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/error_tracking.go @@ -0,0 +1,196 @@ +// +// Copyright 2022, Ryan Glab +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" +) + +// ErrorTrackingService handles communication with the error tracking +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html +type ErrorTrackingService struct { + client *Client +} + +// ErrorTrackingClientKey represents an error tracking client key. +// +// GitLab docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#error-tracking-client-keys +type ErrorTrackingClientKey struct { + ID int `json:"id"` + Active bool `json:"active"` + PublicKey string `json:"public_key"` + SentryDsn string `json:"sentry_dsn"` +} + +func (p ErrorTrackingClientKey) String() string { + return Stringify(p) +} + +// ErrorTrackingSettings represents error tracking settings for a GitLab project. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/error_tracking.html +type ErrorTrackingSettings struct { + Active bool `json:"active"` + ProjectName string `json:"project_name"` + SentryExternalURL string `json:"sentry_external_url"` + APIURL string `json:"api_url"` + Integrated bool `json:"integrated"` +} + +func (p ErrorTrackingSettings) String() string { + return Stringify(p) +} + +// GetErrorTrackingSettings gets error tracking settings. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#get-error-tracking-settings +func (s *ErrorTrackingService) GetErrorTrackingSettings(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + ets := new(ErrorTrackingSettings) + resp, err := s.client.Do(req, ets) + if err != nil { + return nil, resp, err + } + + return ets, resp, err +} + +// EnableDisableErrorTrackingOptions represents the available +// EnableDisableErrorTracking() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings +type EnableDisableErrorTrackingOptions struct { + Active *bool `url:"active,omitempty" json:"active,omitempty"` + Integrated *bool `url:"integrated,omitempty" json:"integrated,omitempty"` +} + +// EnableDisableErrorTracking allows you to enable or disable the error tracking +// settings for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#enable-or-disable-the-error-tracking-project-settings +func (s *ErrorTrackingService) EnableDisableErrorTracking(pid interface{}, opt *EnableDisableErrorTrackingOptions, options ...RequestOptionFunc) (*ErrorTrackingSettings, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/settings", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPatch, u, opt, options) + if err != nil { + return nil, nil, err + } + + ets := new(ErrorTrackingSettings) + resp, err := s.client.Do(req, &ets) + if err != nil { + return nil, resp, err + } + + return ets, resp, err +} + +// ListClientKeysOptions represents the available ListClientKeys() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys +type ListClientKeysOptions ListOptions + +// ListClientKeys lists error tracking project client keys. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#list-project-client-keys +func (s *ErrorTrackingService) ListClientKeys(pid interface{}, opt *ListClientKeysOptions, options ...RequestOptionFunc) ([]*ErrorTrackingClientKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var cks []*ErrorTrackingClientKey + resp, err := s.client.Do(req, &cks) + if err != nil { + return nil, resp, err + } + + return cks, resp, err +} + +// CreateClientKey creates a new client key for a project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#create-a-client-key +func (s *ErrorTrackingService) CreateClientKey(pid interface{}, options ...RequestOptionFunc) (*ErrorTrackingClientKey, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + if err != nil { + return nil, nil, err + } + + ck := new(ErrorTrackingClientKey) + resp, err := s.client.Do(req, ck) + if err != nil { + return nil, resp, err + } + + return ck, resp, err +} + +// DeleteClientKey removes a client key from the project. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/error_tracking.html#delete-a-client-key +func (s *ErrorTrackingService) DeleteClientKey(pid interface{}, keyID int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/error_tracking/client_keys/%d", PathEscape(project), keyID) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go index aff4c4d1bf..a0162e0c30 100644 --- a/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go +++ b/vendor/github.com/xanzy/go-gitlab/event_webhook_types.go @@ -23,7 +23,7 @@ import ( "time" ) -//BuildEvent represents a build event +// BuildEvent represents a build event. // // GitLab API docs: // https://docs.gitlab.com/ce/user/project/integrations/webhooks.html#build-events @@ -37,6 +37,7 @@ type BuildEvent struct { BuildName string `json:"build_name"` BuildStage string `json:"build_stage"` BuildStatus string `json:"build_status"` + BuildCreatedAt string `json:"build_created_at"` BuildStartedAt string `json:"build_started_at"` BuildFinishedAt string `json:"build_finished_at"` BuildDuration float64 `json:"build_duration"` @@ -193,6 +194,7 @@ type IssueCommentEvent struct { Attachment string `json:"attachment"` LineCode string `json:"line_code"` CommitID string `json:"commit_id"` + DiscussionID string `json:"discussion_id"` NoteableID int `json:"noteable_id"` System bool `json:"system"` StDiff []*Diff `json:"st_diff"` @@ -273,6 +275,10 @@ type IssueEvent struct { Assignees *[]EventUser `json:"assignees"` Labels []Label `json:"labels"` Changes struct { + Assignees struct { + Previous []*EventUser `json:"previous"` + Current []*EventUser `json:"current"` + } `json:"assignees"` Description struct { Previous string `json:"previous"` Current string `json:"current"` @@ -515,11 +521,12 @@ type MergeEvent struct { Email string `json:"email"` } `json:"author"` } `json:"last_commit"` - WorkInProgress bool `json:"work_in_progress"` - URL string `json:"url"` - Action string `json:"action"` - OldRev string `json:"oldrev"` - Assignee *EventUser `json:"assignee"` + BlockingDiscussionsResolved bool `json:"blocking_discussions_resolved"` + WorkInProgress bool `json:"work_in_progress"` + URL string `json:"url"` + Action string `json:"action"` + OldRev string `json:"oldrev"` + Assignee *EventUser `json:"assignee"` } `json:"object_attributes"` Repository *Repository `json:"repository"` Assignee *EventUser `json:"assignee"` diff --git a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go index b6d3092db9..cc2d6679ac 100644 --- a/vendor/github.com/xanzy/go-gitlab/external_status_checks.go +++ b/vendor/github.com/xanzy/go-gitlab/external_status_checks.go @@ -64,6 +64,36 @@ func (s *ExternalStatusChecksService) ListMergeStatusChecks(pid interface{}, mr return mscs, resp, err } +// SetExternalStatusCheckStatusOptions represents the available +// SetExternalStatusCheckStatus() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check +type SetExternalStatusCheckStatusOptions struct { + SHA *string `url:"sha,omitempty" json:"sha,omitempty"` + ExternalStatusCheckID *int `url:"external_status_check_id,omitempty" json:"external_status_check_id,omitempty"` + Status *string `url:"status,omitempty" json:"status,omitempty"` +} + +// SetExternalStatusCheckStatus sets the status of an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#set-status-of-an-external-status-check +func (s *ExternalStatusChecksService) SetExternalStatusCheckStatus(pid interface{}, mergeRequest int, opt *SetExternalStatusCheckStatusOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/status_check_responses", PathEscape(project), mergeRequest) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // ListProjectStatusChecks lists the project external status checks. // // GitLab API docs: @@ -88,3 +118,82 @@ func (s *ExternalStatusChecksService) ListProjectStatusChecks(pid interface{}, o return pscs, resp, err } + +// CreateExternalStatusCheckOptions represents the available +// CreateExternalStatusCheck() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check +type CreateExternalStatusCheckOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` +} + +// CreateExternalStatusCheck creates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#create-external-status-check +func (s *ExternalStatusChecksService) CreateExternalStatusCheck(pid interface{}, opt *CreateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks", PathEscape(project)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// DeleteExternalStatusCheck deletes an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#delete-external-status-check +func (s *ExternalStatusChecksService) DeleteExternalStatusCheck(pid interface{}, check int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// UpdateExternalStatusCheckOptions represents the available +// UpdateExternalStatusCheck() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check +type UpdateExternalStatusCheckOptions struct { + Name *string `url:"name,omitempty" json:"name,omitempty"` + ExternalURL *string `url:"external_url,omitempty" json:"external_url,omitempty"` + ProtectedBranchIDs *[]int `url:"protected_branch_ids,omitempty" json:"protected_branch_ids,omitempty"` +} + +// UpdateExternalStatusCheck updates an external status check. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/status_checks.html#update-external-status-check +func (s *ExternalStatusChecksService) UpdateExternalStatusCheck(pid interface{}, check int, opt *UpdateExternalStatusCheckOptions, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/external_status_checks/%d", PathEscape(project), check) + + req, err := s.client.NewRequest(http.MethodPut, u, opt, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go index d1d59bb9d8..8fb8869412 100644 --- a/vendor/github.com/xanzy/go-gitlab/gitlab.go +++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go @@ -23,7 +23,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "math/rand" "mime/multipart" "net/http" @@ -88,7 +87,7 @@ type Client struct { // Token type used to make authenticated API calls. authType AuthType - // Username and password used for basix authentication. + // Username and password used for basic authentication. username, password string // Token used to make authenticated API calls. @@ -101,106 +100,110 @@ type Client struct { UserAgent string // Services used for talking to different parts of the GitLab API. - AccessRequests *AccessRequestsService - Applications *ApplicationsService - AuditEvents *AuditEventsService - Avatar *AvatarRequestsService - AwardEmoji *AwardEmojiService - Boards *IssueBoardsService - Branches *BranchesService - BroadcastMessage *BroadcastMessagesService - CIYMLTemplate *CIYMLTemplatesService - Commits *CommitsService - ContainerRegistry *ContainerRegistryService - CustomAttribute *CustomAttributesService - DeployKeys *DeployKeysService - DeployTokens *DeployTokensService - Deployments *DeploymentsService - Discussions *DiscussionsService - Environments *EnvironmentsService - EpicIssues *EpicIssuesService - Epics *EpicsService - Events *EventsService - ExternalStatusChecks *ExternalStatusChecksService - Features *FeaturesService - FreezePeriods *FreezePeriodsService - GenericPackages *GenericPackagesService - GeoNodes *GeoNodesService - GitIgnoreTemplates *GitIgnoreTemplatesService - GroupAccessTokens *GroupAccessTokensService - GroupBadges *GroupBadgesService - GroupCluster *GroupClustersService - GroupImportExport *GroupImportExportService - GroupIssueBoards *GroupIssueBoardsService - GroupIterations *GroupIterationsService - GroupLabels *GroupLabelsService - GroupMembers *GroupMembersService - GroupMilestones *GroupMilestonesService - GroupVariables *GroupVariablesService - GroupWikis *GroupWikisService - Groups *GroupsService - InstanceCluster *InstanceClustersService - InstanceVariables *InstanceVariablesService - Invites *InvitesService - IssueLinks *IssueLinksService - Issues *IssuesService - IssuesStatistics *IssuesStatisticsService - Jobs *JobsService - Keys *KeysService - Labels *LabelsService - License *LicenseService - LicenseTemplates *LicenseTemplatesService - ManagedLicenses *ManagedLicensesService - Markdown *MarkdownService - MergeRequestApprovals *MergeRequestApprovalsService - MergeRequests *MergeRequestsService - Milestones *MilestonesService - Namespaces *NamespacesService - Notes *NotesService - NotificationSettings *NotificationSettingsService - Packages *PackagesService - Pages *PagesService - PagesDomains *PagesDomainsService - PersonalAccessTokens *PersonalAccessTokensService - PipelineSchedules *PipelineSchedulesService - PipelineTriggers *PipelineTriggersService - Pipelines *PipelinesService - PlanLimits *PlanLimitsService - ProjectBadges *ProjectBadgesService - ProjectAccessTokens *ProjectAccessTokensService - ProjectCluster *ProjectClustersService - ProjectImportExport *ProjectImportExportService - ProjectIterations *ProjectIterationsService - ProjectMembers *ProjectMembersService - ProjectMirrors *ProjectMirrorService - ProjectSnippets *ProjectSnippetsService - ProjectVariables *ProjectVariablesService - ProjectVulnerabilities *ProjectVulnerabilitiesService - Projects *ProjectsService - ProtectedBranches *ProtectedBranchesService - ProtectedEnvironments *ProtectedEnvironmentsService - ProtectedTags *ProtectedTagsService - ReleaseLinks *ReleaseLinksService - Releases *ReleasesService - Repositories *RepositoriesService - RepositoryFiles *RepositoryFilesService - RepositorySubmodules *RepositorySubmodulesService - ResourceLabelEvents *ResourceLabelEventsService - ResourceStateEvents *ResourceStateEventsService - Runners *RunnersService - Search *SearchService - Services *ServicesService - Settings *SettingsService - Sidekiq *SidekiqService - Snippets *SnippetsService - SystemHooks *SystemHooksService - Tags *TagsService - Todos *TodosService - Topics *TopicsService - Users *UsersService - Validate *ValidateService - Version *VersionService - Wikis *WikisService + AccessRequests *AccessRequestsService + Applications *ApplicationsService + AuditEvents *AuditEventsService + Avatar *AvatarRequestsService + AwardEmoji *AwardEmojiService + Boards *IssueBoardsService + Branches *BranchesService + BroadcastMessage *BroadcastMessagesService + CIYMLTemplate *CIYMLTemplatesService + ClusterAgents *ClusterAgentsService + Commits *CommitsService + ContainerRegistry *ContainerRegistryService + CustomAttribute *CustomAttributesService + DeployKeys *DeployKeysService + DeployTokens *DeployTokensService + DeploymentMergeRequests *DeploymentMergeRequestsService + Deployments *DeploymentsService + Discussions *DiscussionsService + Environments *EnvironmentsService + EpicIssues *EpicIssuesService + Epics *EpicsService + ErrorTracking *ErrorTrackingService + Events *EventsService + ExternalStatusChecks *ExternalStatusChecksService + Features *FeaturesService + FreezePeriods *FreezePeriodsService + GenericPackages *GenericPackagesService + GeoNodes *GeoNodesService + GitIgnoreTemplates *GitIgnoreTemplatesService + GroupAccessTokens *GroupAccessTokensService + GroupBadges *GroupBadgesService + GroupCluster *GroupClustersService + GroupImportExport *GroupImportExportService + GroupIssueBoards *GroupIssueBoardsService + GroupIterations *GroupIterationsService + GroupLabels *GroupLabelsService + GroupMembers *GroupMembersService + GroupMilestones *GroupMilestonesService + GroupVariables *GroupVariablesService + GroupWikis *GroupWikisService + Groups *GroupsService + InstanceCluster *InstanceClustersService + InstanceVariables *InstanceVariablesService + Invites *InvitesService + IssueLinks *IssueLinksService + Issues *IssuesService + IssuesStatistics *IssuesStatisticsService + Jobs *JobsService + Keys *KeysService + Labels *LabelsService + License *LicenseService + LicenseTemplates *LicenseTemplatesService + ManagedLicenses *ManagedLicensesService + Markdown *MarkdownService + MergeRequestApprovals *MergeRequestApprovalsService + MergeRequests *MergeRequestsService + Milestones *MilestonesService + Namespaces *NamespacesService + Notes *NotesService + NotificationSettings *NotificationSettingsService + Packages *PackagesService + Pages *PagesService + PagesDomains *PagesDomainsService + PersonalAccessTokens *PersonalAccessTokensService + PipelineSchedules *PipelineSchedulesService + PipelineTriggers *PipelineTriggersService + Pipelines *PipelinesService + PlanLimits *PlanLimitsService + ProjectAccessTokens *ProjectAccessTokensService + ProjectBadges *ProjectBadgesService + ProjectCluster *ProjectClustersService + ProjectImportExport *ProjectImportExportService + ProjectIterations *ProjectIterationsService + ProjectMembers *ProjectMembersService + ProjectMirrors *ProjectMirrorService + ProjectSnippets *ProjectSnippetsService + ProjectVariables *ProjectVariablesService + ProjectVulnerabilities *ProjectVulnerabilitiesService + Projects *ProjectsService + ProtectedBranches *ProtectedBranchesService + ProtectedEnvironments *ProtectedEnvironmentsService + ProtectedTags *ProtectedTagsService + ReleaseLinks *ReleaseLinksService + Releases *ReleasesService + Repositories *RepositoriesService + RepositoryFiles *RepositoryFilesService + RepositorySubmodules *RepositorySubmodulesService + ResourceLabelEvents *ResourceLabelEventsService + ResourceMilestoneEvents *ResourceMilestoneEventsService + ResourceStateEvents *ResourceStateEventsService + Runners *RunnersService + Search *SearchService + Services *ServicesService + Settings *SettingsService + Sidekiq *SidekiqService + Snippets *SnippetsService + SystemHooks *SystemHooksService + Tags *TagsService + Todos *TodosService + Topics *TopicsService + Users *UsersService + Validate *ValidateService + Version *VersionService + Wikis *WikisService } // ListOptions specifies the optional parameters to various List methods that @@ -309,16 +312,19 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.Branches = &BranchesService{client: c} c.BroadcastMessage = &BroadcastMessagesService{client: c} c.CIYMLTemplate = &CIYMLTemplatesService{client: c} + c.ClusterAgents = &ClusterAgentsService{client: c} c.Commits = &CommitsService{client: c} c.ContainerRegistry = &ContainerRegistryService{client: c} c.CustomAttribute = &CustomAttributesService{client: c} c.DeployKeys = &DeployKeysService{client: c} c.DeployTokens = &DeployTokensService{client: c} + c.DeploymentMergeRequests = &DeploymentMergeRequestsService{client: c} c.Deployments = &DeploymentsService{client: c} c.Discussions = &DiscussionsService{client: c} c.Environments = &EnvironmentsService{client: c} c.EpicIssues = &EpicIssuesService{client: c} c.Epics = &EpicsService{client: c} + c.ErrorTracking = &ErrorTrackingService{client: c} c.Events = &EventsService{client: c} c.ExternalStatusChecks = &ExternalStatusChecksService{client: c} c.Features = &FeaturesService{client: c} @@ -365,8 +371,8 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.PipelineTriggers = &PipelineTriggersService{client: c} c.Pipelines = &PipelinesService{client: c} c.PlanLimits = &PlanLimitsService{client: c} - c.ProjectBadges = &ProjectBadgesService{client: c} c.ProjectAccessTokens = &ProjectAccessTokensService{client: c} + c.ProjectBadges = &ProjectBadgesService{client: c} c.ProjectCluster = &ProjectClustersService{client: c} c.ProjectImportExport = &ProjectImportExportService{client: c} c.ProjectIterations = &ProjectIterationsService{client: c} @@ -385,6 +391,7 @@ func newClient(options ...ClientOptionFunc) (*Client, error) { c.RepositoryFiles = &RepositoryFilesService{client: c} c.RepositorySubmodules = &RepositorySubmodulesService{client: c} c.ResourceLabelEvents = &ResourceLabelEventsService{client: c} + c.ResourceMilestoneEvents = &ResourceMilestoneEventsService{client: c} c.ResourceStateEvents = &ResourceStateEventsService{client: c} c.Runners = &RunnersService{client: c} c.Search = &SearchService{client: c} @@ -879,7 +886,7 @@ func CheckResponse(r *http.Response) error { } errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err == nil && data != nil { errorResponse.Body = data diff --git a/vendor/github.com/xanzy/go-gitlab/group_hooks.go b/vendor/github.com/xanzy/go-gitlab/group_hooks.go index acb51d9a4a..6e75d6a66e 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_hooks.go +++ b/vendor/github.com/xanzy/go-gitlab/group_hooks.go @@ -47,17 +47,22 @@ type GroupHook struct { CreatedAt *time.Time `json:"created_at"` } +// ListGroupHooksOptions represents the available ListGroupHooks() options. +// +// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#list-group-hooks +type ListGroupHooksOptions ListOptions + // ListGroupHooks gets a list of group hooks. // // GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#list-group-hooks -func (s *GroupsService) ListGroupHooks(gid interface{}, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { +func (s *GroupsService) ListGroupHooks(gid interface{}, opt *ListGroupHooksOptions, options ...RequestOptionFunc) ([]*GroupHook, *Response, error) { group, err := parseID(gid) if err != nil { return nil, nil, err } u := fmt.Sprintf("groups/%s/hooks", PathEscape(group)) - req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/group_members.go b/vendor/github.com/xanzy/go-gitlab/group_members.go index 4eeee59fbb..16bcdeb5a8 100644 --- a/vendor/github.com/xanzy/go-gitlab/group_members.go +++ b/vendor/github.com/xanzy/go-gitlab/group_members.go @@ -64,7 +64,8 @@ type GroupMember struct { // https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project type ListGroupMembersOptions struct { ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` } // ListGroupMembers get a list of group members viewable by the authenticated @@ -327,18 +328,26 @@ func (s *GroupMembersService) EditGroupMember(gid interface{}, user int, opt *Ed return gm, resp, err } +// RemoveGroupMemberOptions represents the available options to remove a group member. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/members.html#remove-a-member-from-a-group-or-project +type RemoveGroupMemberOptions struct { + SkipSubresources *bool `url:"skip_subresources,omitempty" json:"skip_subresources,omitempty"` + UnassignIssuables *bool `url:"unassign_issuables,omitempty" json:"unassign_issuables,omitempty"` +} + // RemoveGroupMember removes user from user team. // // GitLab API docs: // https://docs.gitlab.com/ce/api/members.html#remove-a-member-from-a-group-or-project -func (s *GroupMembersService) RemoveGroupMember(gid interface{}, user int, options ...RequestOptionFunc) (*Response, error) { +func (s *GroupMembersService) RemoveGroupMember(gid interface{}, user int, opt *RemoveGroupMemberOptions, options ...RequestOptionFunc) (*Response, error) { group, err := parseID(gid) if err != nil { return nil, err } u := fmt.Sprintf("groups/%s/members/%d", PathEscape(group), user) - req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + req, err := s.client.NewRequest(http.MethodDelete, u, opt, options) if err != nil { return nil, err } diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go index 11391c3ee6..96a1e3a37a 100644 --- a/vendor/github.com/xanzy/go-gitlab/groups.go +++ b/vendor/github.com/xanzy/go-gitlab/groups.go @@ -17,7 +17,9 @@ package gitlab import ( + "bytes" "fmt" + "io" "net/http" "time" ) @@ -50,7 +52,7 @@ type Group struct { FileTemplateProjectID int `json:"file_template_project_id"` ParentID int `json:"parent_id"` Projects []*Project `json:"projects"` - Statistics *StorageStatistics `json:"statistics"` + Statistics *Statistics `json:"statistics"` CustomAttributes []*CustomAttribute `json:"custom_attributes"` ShareWithGroupLock bool `json:"share_with_group_lock"` RequireTwoFactorAuth bool `json:"require_two_factor_authentication"` @@ -73,6 +75,7 @@ type Group struct { LDAPCN string `json:"ldap_cn"` LDAPAccess AccessLevelValue `json:"ldap_access"` LDAPGroupLinks []*LDAPGroupLink `json:"ldap_group_links"` + SAMLGroupLinks []*SAMLGroupLink `json:"saml_group_links"` SharedRunnersMinutesLimit int `json:"shared_runners_minutes_limit"` ExtraSharedRunnersMinutesLimit int `json:"extra_shared_runners_minutes_limit"` PreventForkingOutsideGroup bool `json:"prevent_forking_outside_group"` @@ -80,6 +83,14 @@ type Group struct { CreatedAt *time.Time `json:"created_at"` } +// GroupAvatar represents a GitLab group avatar. +// +// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html +type GroupAvatar struct { + Filename string + Image io.Reader +} + // LDAPGroupLink represents a GitLab LDAP group link. // // GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#ldap-group-links @@ -90,6 +101,14 @@ type LDAPGroupLink struct { Provider string `json:"provider"` } +// SAMLGroupLink represents a GitLab SAML group link. +// +// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#saml-group-links +type SAMLGroupLink struct { + Name string `json:"name"` + AccessLevel AccessLevelValue `json:"access_level"` +} + // ListGroupsOptions represents the available ListGroups() options. // // GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#list-project-groups @@ -271,6 +290,31 @@ func (s *GroupsService) GetGroup(gid interface{}, opt *GetGroupOptions, options return g, resp, err } +// DownloadAvatar downloads a group avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#download-a-group-avatar +func (s *GroupsService) DownloadAvatar(gid interface{}, options ...RequestOptionFunc) (*bytes.Reader, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/avatar", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + avatar := new(bytes.Buffer) + resp, err := s.client.Do(req, avatar) + if err != nil { + return nil, resp, err + } + + return bytes.NewReader(avatar.Bytes()), resp, err +} + // CreateGroupOptions represents the available CreateGroup() options. // // GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#new-group @@ -345,6 +389,40 @@ func (s *GroupsService) TransferGroup(gid interface{}, pid interface{}, options return g, resp, err } +// TransferSubGroupOptions represents the available TransferSubGroup() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group +type TransferSubGroupOptions struct { + GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` +} + +// TransferSubGroup transfers a group to a new parent group or turn a subgroup +// to a top-level group. Available to administrators and users. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#transfer-a-group-to-a-new-parent-group--turn-a-subgroup-to-a-top-level-group +func (s *GroupsService) TransferSubGroup(gid interface{}, opt *TransferSubGroupOptions, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/transfer", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, err +} + // UpdateGroupOptions represents the available UpdateGroup() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/groups.html#update-group @@ -398,6 +476,39 @@ func (s *GroupsService) UpdateGroup(gid interface{}, opt *UpdateGroupOptions, op return g, resp, err } +// UploadAvatar uploads a group avatar. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#upload-a-group-avatar +func (s *GroupsService) UploadAvatar(gid interface{}, avatar io.Reader, filename string, options ...RequestOptionFunc) (*Group, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s", PathEscape(group)) + + req, err := s.client.UploadRequest( + http.MethodPut, + u, + avatar, + filename, + UploadAvatar, + nil, + options, + ) + if err != nil { + return nil, nil, err + } + + g := new(Group) + resp, err := s.client.Do(req, g) + if err != nil { + return nil, resp, err + } + + return g, resp, err +} + // DeleteGroup removes group with all projects inside. // // GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#remove-group @@ -642,6 +753,113 @@ func (s *GroupsService) DeleteGroupLDAPLinkForProvider(gid interface{}, provider return s.client.Do(req, nil) } +// ListGroupSAMLLinks lists the group's SAML links. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#list-saml-group-links +func (s *GroupsService) ListGroupSAMLLinks(gid interface{}, options ...RequestOptionFunc) ([]*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + var gl []*SAMLGroupLink + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// GetGroupSAMLLink get a specific group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#get-saml-group-link +func (s *GroupsService) GetGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + gl := new(SAMLGroupLink) + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, nil +} + +// AddGroupSAMLLinkOptions represents the available AddGroupSAMLLink() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link +type AddGroupSAMLLinkOptions struct { + SAMLGroupName *string `url:"saml_group_name,omitempty" json:"saml_group_name,omitempty"` + AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` +} + +// AddGroupSAMLLink creates a new group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#add-saml-group-link +func (s *GroupsService) AddGroupSAMLLink(gid interface{}, opt *AddGroupSAMLLinkOptions, options ...RequestOptionFunc) (*SAMLGroupLink, *Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links", PathEscape(group)) + + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) + if err != nil { + return nil, nil, err + } + + gl := new(SAMLGroupLink) + resp, err := s.client.Do(req, &gl) + if err != nil { + return nil, resp, err + } + + return gl, resp, err +} + +// DeleteGroupSAMLLink deletes a group SAML link. Available only for users who +// can edit groups. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/groups.html#delete-saml-group-link +func (s *GroupsService) DeleteGroupSAMLLink(gid interface{}, samlGroupName string, options ...RequestOptionFunc) (*Response, error) { + group, err := parseID(gid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("groups/%s/saml_group_links/%s", PathEscape(group), PathEscape(samlGroupName)) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + // ShareGroupWithGroupOptions represents the available ShareGroupWithGroup() options. // // GitLab API docs: diff --git a/vendor/github.com/xanzy/go-gitlab/invites.go b/vendor/github.com/xanzy/go-gitlab/invites.go index b5ae81c82d..46c9c8958d 100644 --- a/vendor/github.com/xanzy/go-gitlab/invites.go +++ b/vendor/github.com/xanzy/go-gitlab/invites.go @@ -111,6 +111,7 @@ func (s *InvitesService) ListPendingProjectInvitations(pid interface{}, opt *Lis type InvitesOptions struct { ID interface{} `url:"id,omitempty" json:"id,omitempty"` Email *string `url:"email,omitempty" json:"email,omitempty"` + UserID interface{} `url:"user_id,omitempty" json:"user_id,omitempty"` AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"` ExpiresAt *ISOTime `url:"expires_at,omitempty" json:"expires_at,omitempty"` } diff --git a/vendor/github.com/xanzy/go-gitlab/issue_links.go b/vendor/github.com/xanzy/go-gitlab/issue_links.go index f40b65c468..4fe81ffc50 100644 --- a/vendor/github.com/xanzy/go-gitlab/issue_links.go +++ b/vendor/github.com/xanzy/go-gitlab/issue_links.go @@ -45,12 +45,12 @@ type IssueLink struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/issue_links.html#list-issue-relations -func (s *IssueLinksService) ListIssueRelations(pid interface{}, issueIID int, options ...RequestOptionFunc) ([]*Issue, *Response, error) { +func (s *IssueLinksService) ListIssueRelations(pid interface{}, issue int, options ...RequestOptionFunc) ([]*Issue, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issueIID) + u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -66,6 +66,31 @@ func (s *IssueLinksService) ListIssueRelations(pid interface{}, issueIID int, op return is, resp, err } +// GetIssueLink gets a specific issue link. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/issue_links.html#get-an-issue-link +func (s *IssueLinksService) GetIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/links/%d", PathEscape(project), issue, issueLink) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + il := new(IssueLink) + resp, err := s.client.Do(req, il) + if err != nil { + return nil, resp, err + } + + return il, resp, err +} + // CreateIssueLinkOptions represents the available CreateIssueLink() options. // // GitLab API docs: https://docs.gitlab.com/ee/api/issue_links.html @@ -80,12 +105,12 @@ type CreateIssueLinkOptions struct { // // GitLab API docs: // https://docs.gitlab.com/ee/api/issue_links.html#create-an-issue-link -func (s *IssueLinksService) CreateIssueLink(pid interface{}, issueIID int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { +func (s *IssueLinksService) CreateIssueLink(pid interface{}, issue int, opt *CreateIssueLinkOptions, options ...RequestOptionFunc) (*IssueLink, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } - u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issueIID) + u := fmt.Sprintf("projects/%s/issues/%d/links", PathEscape(project), issue) req, err := s.client.NewRequest(http.MethodPost, u, opt, options) if err != nil { @@ -105,26 +130,26 @@ func (s *IssueLinksService) CreateIssueLink(pid interface{}, issueIID int, opt * // // GitLab API docs: // https://docs.gitlab.com/ee/api/issue_links.html#delete-an-issue-link -func (s *IssueLinksService) DeleteIssueLink(pid interface{}, issueIID, issueLinkID int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { +func (s *IssueLinksService) DeleteIssueLink(pid interface{}, issue, issueLink int, options ...RequestOptionFunc) (*IssueLink, *Response, error) { project, err := parseID(pid) if err != nil { return nil, nil, err } u := fmt.Sprintf("projects/%s/issues/%d/links/%d", PathEscape(project), - issueIID, - issueLinkID) + issue, + issueLink) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { return nil, nil, err } - i := new(IssueLink) - resp, err := s.client.Do(req, &i) + il := new(IssueLink) + resp, err := s.client.Do(req, &il) if err != nil { return nil, resp, err } - return i, resp, err + return il, resp, err } diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go index 208c3d7868..7280efbb58 100644 --- a/vendor/github.com/xanzy/go-gitlab/issues.go +++ b/vendor/github.com/xanzy/go-gitlab/issues.go @@ -120,6 +120,7 @@ type Issue struct { MergeRequestCount int `json:"merge_requests_count"` EpicIssueID int `json:"epic_issue_id"` Epic *Epic `json:"epic"` + Iteration *GroupIteration `json:"iteration"` TaskCompletionStatus *TasksCompletionStatus `json:"task_completion_status"` } @@ -206,32 +207,33 @@ type LabelDetails struct { // GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues type ListIssuesOptions struct { ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListIssues gets all issues created by authenticated user. This function @@ -258,32 +260,33 @@ func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...RequestOpt // GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-group-issues type ListGroupIssuesOptions struct { ListOptions - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListGroupIssues gets a list of group issues. This function accepts @@ -316,33 +319,34 @@ func (s *IssuesService) ListGroupIssues(pid interface{}, opt *ListGroupIssuesOpt // GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-project-issues type ListProjectIssuesOptions struct { ListOptions - IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` - State *string `url:"state,omitempty" json:"state,omitempty"` - Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` - NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` - WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` - Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` - NotMilestone *[]string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` - Scope *string `url:"scope,omitempty" json:"scope,omitempty"` - AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` - AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` - NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` + IIDs *[]int `url:"iids[],omitempty" json:"iids,omitempty"` + State *string `url:"state,omitempty" json:"state,omitempty"` + Labels *Labels `url:"labels,comma,omitempty" json:"labels,omitempty"` + NotLabels *Labels `url:"not[labels],comma,omitempty" json:"not[labels],omitempty"` + WithLabelDetails *bool `url:"with_labels_details,omitempty" json:"with_labels_details,omitempty"` + Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"` + NotMilestone *string `url:"not[milestone],omitempty" json:"not[milestone],omitempty"` + Scope *string `url:"scope,omitempty" json:"scope,omitempty"` + AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"` + AuthorUsername *string `url:"author_username,omitempty" json:"author_username,omitempty"` + NotAuthorID *[]int `url:"not[author_id],omitempty" json:"not[author_id],omitempty"` AssigneeID *AssigneeIDValue `url:"assignee_id,omitempty" json:"assignee_id,omitempty"` - NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` - AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` - MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` - NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` - OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` - Sort *string `url:"sort,omitempty" json:"sort,omitempty"` - Search *string `url:"search,omitempty" json:"search,omitempty"` - In *string `url:"in,omitempty" json:"in,omitempty"` - CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` - CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` - DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` - UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` - UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` - Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` - IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + NotAssigneeID *[]int `url:"not[assignee_id],omitempty" json:"not[assignee_id],omitempty"` + AssigneeUsername *string `url:"assignee_username,omitempty" json:"assignee_username,omitempty"` + MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"` + NotMyReactionEmoji *[]string `url:"not[my_reaction_emoji],omitempty" json:"not[my_reaction_emoji],omitempty"` + OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"` + Sort *string `url:"sort,omitempty" json:"sort,omitempty"` + Search *string `url:"search,omitempty" json:"search,omitempty"` + In *string `url:"in,omitempty" json:"in,omitempty"` + CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"` + CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"` + DueDate *string `url:"due_date,omitempty" json:"due_date,omitempty"` + UpdatedAfter *time.Time `url:"updated_after,omitempty" json:"updated_after,omitempty"` + UpdatedBefore *time.Time `url:"updated_before,omitempty" json:"updated_before,omitempty"` + Confidential *bool `url:"confidential,omitempty" json:"confidential,omitempty"` + IssueType *string `url:"issue_type,omitempty" json:"issue_type,omitempty"` + IterationID *int `url:"iteration_id,omitempty" json:"iteration_id,omitempty"` } // ListProjectIssues gets a list of project issues. This function accepts @@ -630,7 +634,7 @@ func (s *IssuesService) ListMergeRequestsClosingIssue(pid interface{}, issue int if err != nil { return nil, nil, err } - u := fmt.Sprintf("/projects/%s/issues/%d/closed_by", PathEscape(project), issue) + u := fmt.Sprintf("projects/%s/issues/%d/closed_by", PathEscape(project), issue) req, err := s.client.NewRequest(http.MethodGet, u, opt, options) if err != nil { @@ -663,7 +667,7 @@ func (s *IssuesService) ListMergeRequestsRelatedToIssue(pid interface{}, issue i if err != nil { return nil, nil, err } - u := fmt.Sprintf("/projects/%s/issues/%d/related_merge_requests", + u := fmt.Sprintf("projects/%s/issues/%d/related_merge_requests", PathEscape(project), issue, ) diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/github.com/xanzy/go-gitlab/jobs.go index d50a43c974..c70e12094e 100644 --- a/vendor/github.com/xanzy/go-gitlab/jobs.go +++ b/vendor/github.com/xanzy/go-gitlab/jobs.go @@ -48,10 +48,11 @@ type Job struct { ID int `json:"id"` Name string `json:"name"` Pipeline struct { - ID int `json:"id"` - Ref string `json:"ref"` - Sha string `json:"sha"` - Status string `json:"status"` + ID int `json:"id"` + ProjectID int `json:"project_id"` + Ref string `json:"ref"` + Sha string `json:"sha"` + Status string `json:"status"` } `json:"pipeline"` Ref string `json:"ref"` Artifacts []struct { @@ -71,12 +72,13 @@ type Job struct { IsShared bool `json:"is_shared"` Name string `json:"name"` } `json:"runner"` - Stage string `json:"stage"` - Status string `json:"status"` - Tag bool `json:"tag"` - WebURL string `json:"web_url"` - Project *Project `json:"project"` - User *User `json:"user"` + Stage string `json:"stage"` + Status string `json:"status"` + FailureReason string `json:"failure_reason"` + Tag bool `json:"tag"` + WebURL string `json:"web_url"` + Project *Project `json:"project"` + User *User `json:"user"` } // Bridge represents a pipeline bridge. @@ -102,7 +104,10 @@ type Bridge struct { DownstreamPipeline *PipelineInfo `json:"downstream_pipeline"` } -// ListJobsOptions are options for two list apis +// ListJobsOptions represents the available ListProjectJobs() options. +// +// GitLab API docs: +// https://docs.gitlab.com/ce/api/jobs.html#list-project-jobs type ListJobsOptions struct { ListOptions Scope *[]BuildStateValue `url:"scope[],omitempty" json:"scope,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/packages.go b/vendor/github.com/xanzy/go-gitlab/packages.go index 06a19376cf..d3e2be315f 100644 --- a/vendor/github.com/xanzy/go-gitlab/packages.go +++ b/vendor/github.com/xanzy/go-gitlab/packages.go @@ -224,3 +224,22 @@ func (s *PackagesService) DeleteProjectPackage(pid interface{}, pkg int, options return s.client.Do(req, nil) } + +// DeletePackageFile deletes a file in project package +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/packages.html#delete-a-package-file +func (s *PackagesService) DeletePackageFile(pid interface{}, pkg, file int, options ...RequestOptionFunc) (*Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, err + } + u := fmt.Sprintf("projects/%s/packages/%d/package_files/%d", PathEscape(project), pkg, file) + + req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go index 927320209c..a26697c1fd 100644 --- a/vendor/github.com/xanzy/go-gitlab/pipelines.go +++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go @@ -44,6 +44,7 @@ type PipelineVariable struct { // GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html type Pipeline struct { ID int `json:"id"` + IID int `json:"iid"` ProjectID int `json:"project_id"` Status string `json:"status"` Source string `json:"source"` @@ -267,8 +268,17 @@ func (s *PipelinesService) GetPipelineTestReport(pid interface{}, pipeline int, // // GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#create-a-new-pipeline type CreatePipelineOptions struct { - Ref *string `url:"ref" json:"ref"` - Variables *[]*PipelineVariable `url:"variables,omitempty" json:"variables,omitempty"` + Ref *string `url:"ref" json:"ref"` + Variables *[]*PipelineVariableOptions `url:"variables,omitempty" json:"variables,omitempty"` +} + +// PipelineVariable represents a pipeline variable. +// +// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html +type PipelineVariableOptions struct { + Key *string `url:"key,omitempty" json:"key,omitempty"` + Value *string `url:"value,omitempty" json:"value,omitempty"` + VariableType *string `url:"variable_type,omitempty" json:"variable_type,omitempty"` } // CreatePipeline creates a new project pipeline. diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/github.com/xanzy/go-gitlab/project_members.go index ad588bdf7c..0525cbffbc 100644 --- a/vendor/github.com/xanzy/go-gitlab/project_members.go +++ b/vendor/github.com/xanzy/go-gitlab/project_members.go @@ -36,7 +36,8 @@ type ProjectMembersService struct { // https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project type ListProjectMembersOptions struct { ListOptions - Query *string `url:"query,omitempty" json:"query,omitempty"` + Query *string `url:"query,omitempty" json:"query,omitempty"` + UserIDs *[]int `url:"user_ids[],omitempty" json:"user_ids,omitempty"` } // ListProjectMembers gets a list of a project's team members viewable by the diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go index 301a428991..ffd42dcfa8 100644 --- a/vendor/github.com/xanzy/go-gitlab/projects.go +++ b/vendor/github.com/xanzy/go-gitlab/projects.go @@ -121,7 +121,7 @@ type Project struct { GroupName string `json:"group_name"` GroupAccessLevel int `json:"group_access_level"` } `json:"shared_with_groups"` - Statistics *ProjectStatistics `json:"statistics"` + Statistics *Statistics `json:"statistics"` Links *Links `json:"_links,omitempty"` CIConfigPath string `json:"ci_config_path"` CIDefaultGitDepth int `json:"ci_default_git_depth"` @@ -225,16 +225,11 @@ type ProjectNamespace struct { Path string `json:"path"` Kind string `json:"kind"` FullPath string `json:"full_path"` + ParentID int `json:"parent_id"` AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` } -// ProjectStatistics represents a statistics record for a project. -type ProjectStatistics struct { - StorageStatistics - CommitCount int `json:"commit_count"` -} - // Repository represents a repository. type Repository struct { Name string `json:"name"` @@ -253,12 +248,18 @@ type Repository struct { HTTPURL string `json:"http_url"` } -// StorageStatistics represents a statistics record for a group or project. -type StorageStatistics struct { - StorageSize int64 `json:"storage_size"` - RepositorySize int64 `json:"repository_size"` - LfsObjectsSize int64 `json:"lfs_objects_size"` - JobArtifactsSize int64 `json:"job_artifacts_size"` +// Statistics represents a statistics record for a group or project. +type Statistics struct { + CommitCount int64 `json:"commit_count"` + StorageSize int64 `json:"storage_size"` + RepositorySize int64 `json:"repository_size"` + WikiSize int64 `json:"wiki_size"` + LFSObjectsSize int64 `json:"lfs_objects_size"` + JobArtifactsSize int64 `json:"job_artifacts_size"` + PipelineArtifactsSize int64 `json:"pipeline_artifacts_size"` + PackagesSize int64 `json:"packages_size"` + SnippetsSize int64 `json:"snippets_size"` + UploadsSize int64 `json:"uploads_size"` } func (s Project) String() string { @@ -695,6 +696,9 @@ type ContainerExpirationPolicyAttributes struct { NameRegex *string `url:"name_regex,omitempty" json:"name_regex,omitempty"` } +// ProjectAvatar represents a GitLab project avatar. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/projects.html#create-project type ProjectAvatar struct { Filename string Image io.Reader diff --git a/vendor/github.com/xanzy/go-gitlab/protected_environments.go b/vendor/github.com/xanzy/go-gitlab/protected_environments.go index 41fdfb1c93..6f270c8c1b 100644 --- a/vendor/github.com/xanzy/go-gitlab/protected_environments.go +++ b/vendor/github.com/xanzy/go-gitlab/protected_environments.go @@ -35,8 +35,9 @@ type ProtectedEnvironmentsService struct { // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html type ProtectedEnvironment struct { - Name string `json:"name"` - DeployAccessLevels []*EnvironmentAccessDescription `json:"deploy_access_levels"` + Name string `json:"name"` + DeployAccessLevels []*EnvironmentAccessDescription `json:"deploy_access_levels"` + RequiredApprovalCount int `json:"required_approval_count"` } // EnvironmentAccessDescription represents the access decription for a protected @@ -58,7 +59,8 @@ type EnvironmentAccessDescription struct { // https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments type ListProtectedEnvironmentsOptions ListOptions -// ListProtectedEnvironments returns a list of protected environments from a project. +// ListProtectedEnvironments returns a list of protected environments from a +// project. // // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#list-protected-environments @@ -83,7 +85,8 @@ func (s *ProtectedEnvironmentsService) ListProtectedEnvironments(pid interface{} return pes, resp, err } -// GetProtectedEnvironment returns a single protected environment or wildcard protected environment. +// GetProtectedEnvironment returns a single protected environment or wildcard +// protected environment. // // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#get-a-single-protected-environment-or-wildcard-protected-environment @@ -114,8 +117,9 @@ func (s *ProtectedEnvironmentsService) GetProtectedEnvironment(pid interface{}, // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#protect-repository-environments type ProtectRepositoryEnvironmentsOptions struct { - Name *string `url:"name,omitempty" json:"name,omitempty"` - DeployAccessLevels *[]*EnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + DeployAccessLevels *[]*EnvironmentAccessOptions `url:"deploy_access_levels,omitempty" json:"deploy_access_levels,omitempty"` + RequiredApprovalCount *int `url:"required_approval_count,omitempty" json:"required_approval_count,omitempty"` } // EnvironmentAccessOptions represents the options for an access decription for @@ -129,8 +133,8 @@ type EnvironmentAccessOptions struct { GroupID *int `url:"group_id,omitempty" json:"group_id,omitempty"` } -// ProtectRepositoryEnvironments protects a single repository environment or several project -// repository environments using a wildcard protected environment. +// ProtectRepositoryEnvironments protects a single repository environment or +// several project repository environments using wildcard protected environment. // // GitLab API docs: // https://docs.gitlab.com/ee/api/protected_environments.html#protect-repository-environments diff --git a/vendor/github.com/xanzy/go-gitlab/releases.go b/vendor/github.com/xanzy/go-gitlab/releases.go index 4c263bad52..f7876d2876 100644 --- a/vendor/github.com/xanzy/go-gitlab/releases.go +++ b/vendor/github.com/xanzy/go-gitlab/releases.go @@ -49,8 +49,11 @@ type Release struct { AvatarURL string `json:"avatar_url"` WebURL string `json:"web_url"` } `json:"author"` - Commit Commit `json:"commit"` - Assets struct { + Commit Commit `json:"commit"` + UpcomingRelease bool `json:"upcoming_release"` + CommitPath string `json:"commit_path"` + TagPath string `json:"tag_path"` + Assets struct { Count int `json:"count"` Sources []struct { Format string `json:"format"` @@ -121,9 +124,10 @@ func (s *ReleasesService) GetRelease(pid interface{}, tagName string, options .. // GitLab API docs: // https://docs.gitlab.com/ce/api/releases/index.html#create-a-release type CreateReleaseOptions struct { - Name *string `url:"name" json:"name"` - TagName *string `url:"tag_name" json:"tag_name"` - Description *string `url:"description" json:"description"` + Name *string `url:"name,omitempty" json:"name,omitempty"` + TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"` + TagMessage *string `url:"tag_message,omitempty" json:"tag_message,omitempty"` + Description *string `url:"description,omitempty" json:"description,omitempty"` Ref *string `url:"ref,omitempty" json:"ref,omitempty"` Milestones *[]string `url:"milestones,omitempty" json:"milestones,omitempty"` Assets *ReleaseAssetsOptions `url:"assets,omitempty" json:"assets,omitempty"` diff --git a/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go b/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go new file mode 100644 index 0000000000..9c08830410 --- /dev/null +++ b/vendor/github.com/xanzy/go-gitlab/resource_milestone_events.go @@ -0,0 +1,155 @@ +// +// Copyright 2022, Mai Lapyst +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package gitlab + +import ( + "fmt" + "net/http" + "time" +) + +// ResourceMilestoneEventsService handles communication with the event related +// methods of the GitLab API. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html +type ResourceMilestoneEventsService struct { + client *Client +} + +// MilestoneEvent represents a resource milestone event. +// +// GitLab API docs: https://docs.gitlab.com/ee/api/resource_milestone_events.html +type MilestoneEvent struct { + ID int `json:"id"` + User *BasicUser `json:"user"` + CreatedAt *time.Time `json:"created_at"` + ResourceType string `json:"resource_type"` + ResourceID int `json:"resource_id"` + Milestone *Milestone `json:"milestone"` + Action string `json:"action"` +} + +// ListMilestoneEventsOptions represents the options for all resource state events +// list methods. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events +type ListMilestoneEventsOptions struct { + ListOptions +} + +// ListIssueMilestoneEvents retrieves resource milestone events for the specified +// project and issue. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-issue-milestone-events +func (s *ResourceMilestoneEventsService) ListIssueMilestoneEvents(pid interface{}, issue int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events", PathEscape(project), issue) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mes []*MilestoneEvent + resp, err := s.client.Do(req, &mes) + if err != nil { + return nil, resp, err + } + + return mes, resp, err +} + +// GetIssueMilestoneEvent gets a single issue milestone event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-issue-milestone-event +func (s *ResourceMilestoneEventsService) GetIssueMilestoneEvent(pid interface{}, issue int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/issues/%d/resource_milestone_events/%d", PathEscape(project), issue, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + me := new(MilestoneEvent) + resp, err := s.client.Do(req, me) + if err != nil { + return nil, resp, err + } + + return me, resp, err +} + +// ListMergeMilestoneEvents retrieves resource milestone events for the specified +// project and merge request. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#list-project-merge-request-milestone-events +func (s *ResourceMilestoneEventsService) ListMergeMilestoneEvents(pid interface{}, request int, opt *ListMilestoneEventsOptions, options ...RequestOptionFunc) ([]*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events", PathEscape(project), request) + + req, err := s.client.NewRequest(http.MethodGet, u, opt, options) + if err != nil { + return nil, nil, err + } + + var mes []*MilestoneEvent + resp, err := s.client.Do(req, &mes) + if err != nil { + return nil, resp, err + } + + return mes, resp, err +} + +// GetMergeRequestMilestoneEvent gets a single merge request milestone event. +// +// GitLab API docs: +// https://docs.gitlab.com/ee/api/resource_milestone_events.html#get-single-merge-request-milestone-event +func (s *ResourceMilestoneEventsService) GetMergeRequestMilestoneEvent(pid interface{}, request int, event int, options ...RequestOptionFunc) (*MilestoneEvent, *Response, error) { + project, err := parseID(pid) + if err != nil { + return nil, nil, err + } + u := fmt.Sprintf("projects/%s/merge_requests/%d/resource_milestone_events/%d", PathEscape(project), request, event) + + req, err := s.client.NewRequest(http.MethodGet, u, nil, options) + if err != nil { + return nil, nil, err + } + + me := new(MilestoneEvent) + resp, err := s.client.Do(req, me) + if err != nil { + return nil, resp, err + } + + return me, resp, err +} diff --git a/vendor/github.com/xanzy/go-gitlab/runners.go b/vendor/github.com/xanzy/go-gitlab/runners.go index 35957d4d0f..a56db05b0d 100644 --- a/vendor/github.com/xanzy/go-gitlab/runners.go +++ b/vendor/github.com/xanzy/go-gitlab/runners.go @@ -34,17 +34,18 @@ type RunnersService struct { // // GitLab API docs: https://docs.gitlab.com/ce/api/runners.html type Runner struct { - ID int `json:"id"` - Description string `json:"description"` - Active bool `json:"active"` - Paused bool `json:"paused"` - IsShared bool `json:"is_shared"` - IPAddress string `json:"ip_address"` - RunnerType string `json:"runner_type"` - Name string `json:"name"` - Online bool `json:"online"` - Status string `json:"status"` - Token string `json:"token"` + ID int `json:"id"` + Description string `json:"description"` + Active bool `json:"active"` + Paused bool `json:"paused"` + IsShared bool `json:"is_shared"` + IPAddress string `json:"ip_address"` + RunnerType string `json:"runner_type"` + Name string `json:"name"` + Online bool `json:"online"` + Status string `json:"status"` + Token string `json:"token"` + TokenExpiresAt *time.Time `json:"token_expires_at"` } // RunnerDetails represents the GitLab CI runner details. @@ -497,7 +498,8 @@ func (s *RunnersService) VerifyRegisteredRunner(opt *VerifyRegisteredRunnerOptio } type RunnerRegistrationToken struct { - Token *string `url:"token" json:"token"` + Token *string `url:"token" json:"token"` + TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` } // ResetInstanceRunnerRegistrationToken resets the instance runner registration @@ -570,7 +572,8 @@ func (s *RunnersService) ResetProjectRunnerRegistrationToken(pid interface{}, op } type RunnerAuthenticationToken struct { - Token *string `url:"token" json:"token"` + Token *string `url:"token" json:"token"` + TokenExpiresAt *time.Time `url:"token_expires_at" json:"token_expires_at"` } // ResetRunnerAuthenticationToken resets a runner's authentication token. diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go index 9cc8e4b65a..6ac05ae792 100644 --- a/vendor/github.com/xanzy/go-gitlab/settings.go +++ b/vendor/github.com/xanzy/go-gitlab/settings.go @@ -32,179 +32,354 @@ type SettingsService struct { // Settings represents the GitLab application settings. // // GitLab API docs: https://docs.gitlab.com/ce/api/settings.html +// +// The available parameters have been modeled directly after the code, as the +// documentation seems to be inaccurate. +// +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/settings.rb +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/lib/api/entities/application_setting.rb#L5 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/app/helpers/application_settings_helper.rb#L192 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/lib/ee/api/helpers/settings_helpers.rb#L10 +// https://gitlab.com/gitlab-org/gitlab/-/blob/v14.9.3-ee/ee/app/helpers/ee/application_settings_helper.rb#L20 type Settings struct { - ID int `json:"id"` - CreatedAt *time.Time `json:"created_at"` - UpdatedAt *time.Time `json:"updated_at"` - AdminMode bool `json:"admin_mode"` - AdminNotificationEmail string `json:"admin_notification_email"` - AfterSignOutPath string `json:"after_sign_out_path"` - AfterSignUpText string `json:"after_sign_up_text"` - AkismetAPIKey string `json:"akismet_api_key"` - AkismetEnabled bool `json:"akismet_enabled"` - AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` - AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` - AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` - AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` - ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` - AssetProxyEnabled bool `json:"asset_proxy_enabled"` - AssetProxySecretKey string `json:"asset_proxy_secret_key"` - AssetProxyURL string `json:"asset_proxy_url"` - AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` - AuthorizedKeysEnabled bool `json:"authorized_keys_enabled_enabled"` - AutoDevOpsDomain string `json:"auto_devops_domain"` - AutoDevOpsEnabled bool `json:"auto_devops_enabled"` - CheckNamespacePlan bool `json:"check_namespace_plan"` - CommitEmailHostname string `json:"commit_email_hostname"` - ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` - DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` - DefaultBranchProtection int `json:"default_branch_protection"` - DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` - DefaultProjectCreation int `json:"default_project_creation"` - DefaultProjectsLimit int `json:"default_projects_limit"` - DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` - DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` - DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` - DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` - DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` - DomainBlacklist []string `json:"domain_blacklist"` - DomainBlacklistEnabled bool `json:"domain_blacklist_enabled"` - DomainWhitelist []string `json:"domain_whitelist"` - DSAKeyRestriction int `json:"dsa_key_restriction"` - ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` - Ed25519KeyRestriction int `json:"ed25519_key_restriction"` - ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` - ElasticsearchAWS bool `json:"elasticsearch_aws"` - ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` - ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` - ElasticsearchIndexing bool `json:"elasticsearch_indexing"` - ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` - ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` - ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` - ElasticsearchSearch bool `json:"elasticsearch_search"` - ElasticsearchURL []string `json:"elasticsearch_url"` - EmailAdditionalText string `json:"email_additional_text"` - EmailAuthorInBody bool `json:"email_author_in_body"` - EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` - EnforceTerms bool `json:"enforce_terms"` - ExternalAuthClientCert string `json:"external_auth_client_cert"` - ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` - ExternalAuthClientKey string `json:"external_auth_client_key"` - ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` - ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` - ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` - ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` - FileTemplateProjectID int `json:"file_template_project_id"` - FirstDayOfWeek int `json:"first_day_of_week"` - GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` - GeoStatusTimeout int `json:"geo_status_timeout"` - GitalyTimeoutDefault int `json:"gitaly_timeout_default"` - GitalyTimeoutFast int `json:"gitaly_timeout_fast"` - GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` - GrafanaEnabled bool `json:"grafana_enabled"` - GrafanaURL string `json:"grafana_url"` - GravatarEnabled bool `json:"gravatar_enabled"` - GroupDownloadExportLimit int `json:"group_download_export_limit"` - GroupExportLimit int `json:"group_export_limit"` - GroupImportLimit int `json:"group_import_limit"` - HashedStorageEnabled bool `json:"hashed_storage_enabled"` - HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` - HelpPageSupportURL string `json:"help_page_support_url"` - HelpPageText string `json:"help_page_text"` - HelpText string `json:"help_text"` - HideThirdPartyOffers bool `json:"hide_third_party_offers"` - HomePageURL string `json:"home_page_url"` - HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` - HousekeepingEnabled bool `json:"housekeeping_enabled"` - HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` - HousekeepingGcPeriod int `json:"housekeeping_gc_period"` - HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` - HTMLEmailsEnabled bool `json:"html_emails_enabled"` - ImportSources []string `json:"import_sources"` - InstanceStatisticsVisibilityPrivate bool `json:"instance_statistics_visibility_private"` - LocalMarkdownVersion int `json:"local_markdown_version"` - MaxArtifactsSize int `json:"max_artifacts_size"` - MaxAttachmentSize int `json:"max_attachment_size"` - MaxPagesSize int `json:"max_pages_size"` - MetricsEnabled bool `json:"metrics_enabled"` - MetricsHost string `json:"metrics_host"` - MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` - MetricsPacketSize int `json:"metrics_packet_size"` - MetricsPoolSize int `json:"metrics_pool_size"` - MetricsPort int `json:"metrics_port"` - MetricsSampleInterval int `json:"metrics_sample_interval"` - MetricsTimeout int `json:"metrics_timeout"` - MirrorAvailable bool `json:"mirror_available"` - MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` - MirrorMaxCapacity int `json:"mirror_max_capacity"` - MirrorMaxDelay int `json:"mirror_max_delay"` - OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` - PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` - PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` - PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` - PerformanceBarAllowedGroupID string `json:"performance_bar_allowed_group_id"` - PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` - PerformanceBarEnabled bool `json:"performance_bar_enabled"` - PlantumlEnabled bool `json:"plantuml_enabled"` - PlantumlURL string `json:"plantuml_url"` - PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` - ProjectExportEnabled bool `json:"project_export_enabled"` - ProjectDownloadExportLimit int `json:"project_download_export_limit"` - ProjectExportLimit int `json:"project_export_limit"` - ProjectImportLimit int `json:"project_import_limit"` - PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` - ProtectedCIVariables bool `json:"protected_ci_variables"` - PseudonymizerEnabled bool `json:"psedonymizer_enabled"` - PushEventHooksLimit int `json:"push_event_hooks_limit"` - PushEventActivitiesLimit int `json:"push_event_activities_limit"` - RecaptchaEnabled bool `json:"recaptcha_enabled"` - RecaptchaPrivateKey string `json:"recaptcha_private_key"` - RecaptchaSiteKey string `json:"recaptcha_site_key"` - ReceiveMaxInputSize int `json:"receive_max_input_size"` - RepositoryChecksEnabled bool `json:"repository_checks_enabled"` - RepositorySizeLimit int `json:"repository_size_limit"` - RepositoryStorages []string `json:"repository_storages"` - RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` - RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` - RsaKeyRestriction int `json:"rsa_key_restriction"` - SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` - SessionExpireDelay int `json:"session_expire_delay"` - SharedRunnersEnabled bool `json:"shared_runners_enabled"` - SharedRunnersMinutes int `json:"shared_runners_minutes"` - SharedRunnersText string `json:"shared_runners_text"` - SignInText string `json:"sign_in_text"` - SignupEnabled bool `json:"signup_enabled"` - SlackAppEnabled bool `json:"slack_app_enabled"` - SlackAppID string `json:"slack_app_id"` - SlackAppSecret string `json:"slack_app_secret"` - SlackAppVerificationToken string `json:"slack_app_verification_token"` - SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` - SnowplowCookieDomain string `json:"snowplow_cookie_domain"` - SnowplowEnabled bool `json:"snowplow_enabled"` - SnowplowSiteID string `json:"snowplow_site_id"` - TerminalMaxSessionTime int `json:"terminal_max_session_time"` - Terms string `json:"terms"` - ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` - ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` - ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` - ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` - ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` - ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` - ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` - ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` - ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` - TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` - TwoFactorGracePeriod int `json:"two_factor_grace_period"` - UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` - UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` - UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` - UsagePingEnabled bool `json:"usage_ping_enabled"` - UserDefaultExternal bool `json:"user_default_external"` - UserDefaultInternalRegex string `json:"user_default_internal_regex"` - UserOauthApplications bool `json:"user_oauth_applications"` - UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` - VersionCheckEnabled bool `json:"version_check_enabled"` - WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + ID int `json:"id"` + AbuseNotificationEmail string `json:"abuse_notification_email"` + AdminMode bool `json:"admin_mode"` + AdminNotificationEmail string `json:"admin_notification_email"` // deprecated + AfterSignOutPath string `json:"after_sign_out_path"` + AfterSignUpText string `json:"after_sign_up_text"` + AkismetAPIKey string `json:"akismet_api_key"` + AkismetEnabled bool `json:"akismet_enabled"` + AllowGroupOwnersToManageLDAP bool `json:"allow_group_owners_to_manage_ldap"` + AllowLocalRequestsFromHooksAndServices bool `json:"allow_local_requests_from_hooks_and_services"` // deprecated + AllowLocalRequestsFromSystemHooks bool `json:"allow_local_requests_from_system_hooks"` + AllowLocalRequestsFromWebHooksAndServices bool `json:"allow_local_requests_from_web_hooks_and_services"` + ArchiveBuildsInHumanReadable string `json:"archive_builds_in_human_readable"` + AssetProxyAllowlist []string `json:"asset_proxy_allowlist"` + AssetProxyEnabled bool `json:"asset_proxy_enabled"` + AssetProxyURL string `json:"asset_proxy_url"` + AssetProxySecretKey string `json:"asset_proxy_secret_key"` + AssetProxyWhitelist []string `json:"asset_proxy_whitelist"` // deprecated + AuthorizedKeysEnabled bool `json:"authorized_keys_enabled"` + AutoDevOpsDomain string `json:"auto_devops_domain"` + AutoDevOpsEnabled bool `json:"auto_devops_enabled"` + AutomaticPurchasedStorageAllocation bool `json:"automatic_purchased_storage_allocation"` + CheckNamespacePlan bool `json:"check_namespace_plan"` + CommitEmailHostname string `json:"commit_email_hostname"` + ContainerExpirationPoliciesEnableHistoricEntries bool `json:"container_expiration_policies_enable_historic_entries"` + ContainerRegistryCleanupTagsServiceMaxListSize int `json:"container_registry_cleanup_tags_service_max_list_size"` + ContainerRegistryDeleteTagsServiceTimeout int `json:"container_registry_delete_tags_service_timeout"` + ContainerRegistryExpirationPoliciesCaching bool `json:"container_registry_expiration_policies_caching"` + ContainerRegistryExpirationPoliciesWorkerCapacity int `json:"container_registry_expiration_policies_worker_capacity"` + ContainerRegistryImportCreatedBefore *time.Time `json:"container_registry_import_created_before"` + ContainerRegistryImportMaxRetries int `json:"container_registry_import_max_retries"` + ContainerRegistryImportMaxStepDuration int `json:"container_registry_import_max_step_duration"` + ContainerRegistryImportMaxTagsCount int `json:"container_registry_import_max_tags_count"` + ContainerRegistryImportStartMaxRetries int `json:"container_registry_import_start_max_retries"` + ContainerRegistryImportTargetPlan string `json:"container_registry_import_target_plan"` + ContainerRegistryTokenExpireDelay int `json:"container_registry_token_expire_delay"` + CreatedAt *time.Time `json:"created_at"` + CustomHTTPCloneURLRoot string `json:"custom_http_clone_url_root"` + DNSRebindingProtectionEnabled bool `json:"dns_rebinding_protection_enabled"` + DSAKeyRestriction int `json:"dsa_key_restriction"` + DeactivateDormantUsers bool `json:"deactivate_dormant_users"` + DefaultArtifactsExpireIn string `json:"default_artifacts_expire_in"` + DefaultBranchName string `json:"default_branch_name"` + DefaultBranchProtection int `json:"default_branch_protection"` + DefaultCiConfigPath string `json:"default_ci_config_path"` + DefaultGroupVisibility VisibilityValue `json:"default_group_visibility"` + DefaultProjectCreation int `json:"default_project_creation"` + DefaultProjectDeletionProtection bool `json:"default_project_deletion_protection"` + DefaultProjectVisibility VisibilityValue `json:"default_project_visibility"` + DefaultProjectsLimit int `json:"default_projects_limit"` + DefaultSnippetVisibility VisibilityValue `json:"default_snippet_visibility"` + DelayedGroupDeletion bool `json:"delayed_group_deletion"` + DelayedProjectDeletion bool `json:"delayed_project_deletion"` + DeleteInactiveProjects bool `json:"delete_inactive_projects"` + DeletionAdjournedPeriod int `json:"deletion_adjourned_period"` + DiffMaxFiles int `json:"diff_max_files"` + DiffMaxLines int `json:"diff_max_lines"` + DiffMaxPatchBytes int `json:"diff_max_patch_bytes"` + DisableFeedToken bool `json:"disable_feed_token"` + DisableOverridingApproversPerMergeRequest bool `json:"disable_overriding_approvers_per_merge_request"` + DisabledOauthSignInSources []string `json:"disabled_oauth_sign_in_sources"` + DomainAllowlist []string `json:"domain_allowlist"` + DomainDenylist []string `json:"domain_denylist"` + DomainDenylistEnabled bool `json:"domain_denylist_enabled"` + ECDSAKeyRestriction int `json:"ecdsa_key_restriction"` + ECDSASKKeyRestriction int `json:"ecdsa_sk_key_restriction"` + EKSAccessKeyID string `json:"eks_access_key_id"` + EKSAccountID string `json:"eks_account_id"` + EKSIntegrationEnabled bool `json:"eks_integration_enabled"` + EKSSecretAccessKey string `json:"eks_secret_access_key"` + Ed25519KeyRestriction int `json:"ed25519_key_restriction"` + Ed25519SKKeyRestriction int `json:"ed25519_sk_key_restriction"` + ElasticsearchAWS bool `json:"elasticsearch_aws"` + ElasticsearchAWSAccessKey string `json:"elasticsearch_aws_access_key"` + ElasticsearchAWSRegion string `json:"elasticsearch_aws_region"` + ElasticsearchAWSSecretAccessKey string `json:"elasticsearch_aws_secret_access_key"` + ElasticsearchAnalyzersKuromojiEnabled bool `json:"elasticsearch_analyzers_kuromoji_enabled"` + ElasticsearchAnalyzersKuromojiSearch bool `json:"elasticsearch_analyzers_kuromoji_search"` + ElasticsearchAnalyzersSmartCNEnabled bool `json:"elasticsearch_analyzers_smartcn_enabled"` + ElasticsearchAnalyzersSmartCNSearch bool `json:"elasticsearch_analyzers_smartcn_search"` + ElasticsearchClientRequestTimeout int `json:"elasticsearch_client_request_timeout"` + ElasticsearchIndexedFieldLengthLimit int `json:"elasticsearch_indexed_field_length_limit"` + ElasticsearchIndexedFileSizeLimitKB int `json:"elasticsearch_indexed_file_size_limit_kb"` + ElasticsearchIndexing bool `json:"elasticsearch_indexing"` + ElasticsearchLimitIndexing bool `json:"elasticsearch_limit_indexing"` + ElasticsearchMaxBulkConcurrency int `json:"elasticsearch_max_bulk_concurrency"` + ElasticsearchMaxBulkSizeMB int `json:"elasticsearch_max_bulk_size_mb"` + ElasticsearchNamespaceIDs []int `json:"elasticsearch_namespace_ids"` + ElasticsearchPassword string `json:"elasticsearch_password"` + ElasticsearchPauseIndexing bool `json:"elasticsearch_pause_indexing"` + ElasticsearchProjectIDs []int `json:"elasticsearch_project_ids"` + ElasticsearchReplicas int `json:"elasticsearch_replicas"` + ElasticsearchSearch bool `json:"elasticsearch_search"` + ElasticsearchShards int `json:"elasticsearch_shards"` + ElasticsearchURL []string `json:"elasticsearch_url"` + ElasticsearchUsername string `json:"elasticsearch_username"` + EmailAdditionalText string `json:"email_additional_text"` + EmailAuthorInBody bool `json:"email_author_in_body"` + EmailRestrictions string `json:"email_restrictions"` + EmailRestrictionsEnabled bool `json:"email_restrictions_enabled"` + EnabledGitAccessProtocol string `json:"enabled_git_access_protocol"` + EnforceNamespaceStorageLimit bool `json:"enforce_namespace_storage_limit"` + EnforcePATExpiration bool `json:"enforce_pat_expiration"` + EnforceSSHKeyExpiration bool `json:"enforce_ssh_key_expiration"` + EnforceTerms bool `json:"enforce_terms"` + ExternalAuthClientCert string `json:"external_auth_client_cert"` + ExternalAuthClientKey string `json:"external_auth_client_key"` + ExternalAuthClientKeyPass string `json:"external_auth_client_key_pass"` + ExternalAuthorizationServiceDefaultLabel string `json:"external_authorization_service_default_label"` + ExternalAuthorizationServiceEnabled bool `json:"external_authorization_service_enabled"` + ExternalAuthorizationServiceTimeout float64 `json:"external_authorization_service_timeout"` + ExternalAuthorizationServiceURL string `json:"external_authorization_service_url"` + ExternalPipelineValidationServiceTimeout int `json:"external_pipeline_validation_service_timeout"` + ExternalPipelineValidationServiceToken string `json:"external_pipeline_validation_service_token"` + ExternalPipelineValidationServiceURL string `json:"external_pipeline_validation_service_url"` + FileTemplateProjectID int `json:"file_template_project_id"` + FirstDayOfWeek int `json:"first_day_of_week"` + FlocEnabled bool `json:"floc_enabled"` + GeoNodeAllowedIPs string `json:"geo_node_allowed_ips"` + GeoStatusTimeout int `json:"geo_status_timeout"` + GitTwoFactorSessionExpiry int `json:"git_two_factor_session_expiry"` + GitalyTimeoutDefault int `json:"gitaly_timeout_default"` + GitalyTimeoutFast int `json:"gitaly_timeout_fast"` + GitalyTimeoutMedium int `json:"gitaly_timeout_medium"` + GitpodEnabled bool `json:"gitpod_enabled"` + GitpodURL string `json:"gitpod_url"` + GitRateLimitUsersAllowlist []string `json:"git_rate_limit_users_allowlist"` + GrafanaEnabled bool `json:"grafana_enabled"` + GrafanaURL string `json:"grafana_url"` + GravatarEnabled bool `json:"gravatar_enabled"` + GroupDownloadExportLimit int `json:"group_download_export_limit"` + GroupExportLimit int `json:"group_export_limit"` + GroupImportLimit int `json:"group_import_limit"` + GroupOwnersCanManageDefaultBranchProtection bool `json:"group_owners_can_manage_default_branch_protection"` + GroupRunnerTokenExpirationInterval int `json:"group_runner_token_expiration_interval"` + HTMLEmailsEnabled bool `json:"html_emails_enabled"` + HashedStorageEnabled bool `json:"hashed_storage_enabled"` + HelpPageDocumentationBaseURL string `json:"help_page_documentation_base_url"` + HelpPageHideCommercialContent bool `json:"help_page_hide_commercial_content"` + HelpPageSupportURL string `json:"help_page_support_url"` + HelpPageText string `json:"help_page_text"` + HelpText string `json:"help_text"` + HideThirdPartyOffers bool `json:"hide_third_party_offers"` + HomePageURL string `json:"home_page_url"` + HousekeepingBitmapsEnabled bool `json:"housekeeping_bitmaps_enabled"` + HousekeepingEnabled bool `json:"housekeeping_enabled"` + HousekeepingFullRepackPeriod int `json:"housekeeping_full_repack_period"` + HousekeepingGcPeriod int `json:"housekeeping_gc_period"` + HousekeepingIncrementalRepackPeriod int `json:"housekeeping_incremental_repack_period"` + ImportSources []string `json:"import_sources"` + InactiveProjectsDeleteAfterMonths int `json:"inactive_projects_delete_after_months"` + InactiveProjectsMinSizeMB int `json:"inactive_projects_min_size_mb"` + InactiveProjectsSendWarningEmailAfterMonths int `json:"inactive_projects_send_warning_email_after_months"` + InProductMarketingEmailsEnabled bool `json:"in_product_marketing_emails_enabled"` + InvisibleCaptchaEnabled bool `json:"invisible_captcha_enabled"` + IssuesCreateLimit int `json:"issues_create_limit"` + KeepLatestArtifact bool `json:"keep_latest_artifact"` + KrokiEnabled bool `json:"kroki_enabled"` + KrokiFormats map[string]bool `json:"kroki_formats"` + KrokiURL string `json:"kroki_url"` + LocalMarkdownVersion int `json:"local_markdown_version"` + LockMembershipsToLDAP bool `json:"lock_memberships_to_ldap"` + LoginRecaptchaProtectionEnabled bool `json:"login_recaptcha_protection_enabled"` + MailgunEventsEnabled bool `json:"mailgun_events_enabled"` + MailgunSigningKey string `json:"mailgun_signing_key"` + MaintenanceMode bool `json:"maintenance_mode"` + MaintenanceModeMessage string `json:"maintenance_mode_message"` + MaxArtifactsSize int `json:"max_artifacts_size"` + MaxAttachmentSize int `json:"max_attachment_size"` + MaxExportSize int `json:"max_export_size"` + MaxImportSize int `json:"max_import_size"` + MaxNumberOfRepositoryDownloads int `json:"max_number_of_repository_downloads"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod int `json:"max_number_of_repository_downloads_within_time_period"` + MaxPagesSize int `json:"max_pages_size"` + MaxPersonalAccessTokenLifetime int `json:"max_personal_access_token_lifetime"` + MaxSSHKeyLifetime int `json:"max_ssh_key_lifetime"` + MaxYAMLDepth int `json:"max_yaml_depth"` + MaxYAMLSizeBytes int `json:"max_yaml_size_bytes"` + MetricsMethodCallThreshold int `json:"metrics_method_call_threshold"` + MinimumPasswordLength int `json:"minimum_password_length"` + MirrorAvailable bool `json:"mirror_available"` + MirrorCapacityThreshold int `json:"mirror_capacity_threshold"` + MirrorMaxCapacity int `json:"mirror_max_capacity"` + MirrorMaxDelay int `json:"mirror_max_delay"` + NPMPackageRequestsForwarding bool `json:"npm_package_requests_forwarding"` + NotesCreateLimit int `json:"notes_create_limit"` + NotifyOnUnknownSignIn bool `json:"notify_on_unknown_sign_in"` + OutboundLocalRequestsAllowlistRaw string `json:"outbound_local_requests_allowlist_raw"` + OutboundLocalRequestsWhitelist []string `json:"outbound_local_requests_whitelist"` + PackageRegistryCleanupPoliciesWorkerCapacity int `json:"package_registry_cleanup_policies_worker_capacity"` + PagesDomainVerificationEnabled bool `json:"pages_domain_verification_enabled"` + PasswordAuthenticationEnabledForGit bool `json:"password_authentication_enabled_for_git"` + PasswordAuthenticationEnabledForWeb bool `json:"password_authentication_enabled_for_web"` + PasswordNumberRequired bool `json:"password_number_required"` + PasswordSymbolRequired bool `json:"password_symbol_required"` + PasswordUppercaseRequired bool `json:"password_uppercase_required"` + PasswordLowercaseRequired bool `json:"password_lowercase_required"` + PerformanceBarAllowedGroupID string `json:"performance_bar_allowed_group_id"` + PerformanceBarAllowedGroupPath string `json:"performance_bar_allowed_group_path"` + PerformanceBarEnabled bool `json:"performance_bar_enabled"` + PersonalAccessTokenPrefix string `json:"personal_access_token_prefix"` + PipelineLimitPerProjectUserSha int `json:"pipeline_limit_per_project_user_sha"` + PlantumlEnabled bool `json:"plantuml_enabled"` + PlantumlURL string `json:"plantuml_url"` + PollingIntervalMultiplier float64 `json:"polling_interval_multiplier,string"` + PreventMergeRequestsAuthorApproval bool `json:"prevent_merge_request_author_approval"` + PreventMergeRequestsCommittersApproval bool `json:"prevent_merge_request_committers_approval"` + ProjectDownloadExportLimit int `json:"project_download_export_limit"` + ProjectExportEnabled bool `json:"project_export_enabled"` + ProjectExportLimit int `json:"project_export_limit"` + ProjectImportLimit int `json:"project_import_limit"` + ProjectRunnerTokenExpirationInterval int `json:"project_runner_token_expiration_interval"` + PrometheusMetricsEnabled bool `json:"prometheus_metrics_enabled"` + ProtectedCIVariables bool `json:"protected_ci_variables"` + PseudonymizerEnabled bool `json:"pseudonymizer_enabled"` + PushEventActivitiesLimit int `json:"push_event_activities_limit"` + PushEventHooksLimit int `json:"push_event_hooks_limit"` + PyPIPackageRequestsForwarding bool `json:"pypi_package_requests_forwarding"` + RSAKeyRestriction int `json:"rsa_key_restriction"` + RateLimitingResponseText string `json:"rate_limiting_response_text"` + RawBlobRequestLimit int `json:"raw_blob_request_limit"` + RecaptchaEnabled bool `json:"recaptcha_enabled"` + RecaptchaPrivateKey string `json:"recaptcha_private_key"` + RecaptchaSiteKey string `json:"recaptcha_site_key"` + ReceiveMaxInputSize int `json:"receive_max_input_size"` + RepositoryChecksEnabled bool `json:"repository_checks_enabled"` + RepositorySizeLimit int `json:"repository_size_limit"` + RepositoryStorages []string `json:"repository_storages"` + RepositoryStoragesWeighted map[string]int `json:"repository_storages_weighted"` + RequireAdminApprovalAfterUserSignup bool `json:"require_admin_approval_after_user_signup"` + RequireTwoFactorAuthentication bool `json:"require_two_factor_authentication"` + RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"` + RunnerTokenExpirationInterval int `json:"runner_token_expiration_interval"` + SearchRateLimit int `json:"search_rate_limit"` + SearchRateLimitUnauthenticated int `json:"search_rate_limit_unauthenticated"` + SecretDetectionRevocationTokenTypesURL string `json:"secret_detection_revocation_token_types_url"` + SecretDetectionTokenRevocationEnabled bool `json:"secret_detection_token_revocation_enabled"` + SecretDetectionTokenRevocationToken string `json:"secret_detection_token_revocation_token"` + SecretDetectionTokenRevocationURL string `json:"secret_detection_token_revocation_url"` + SendUserConfirmationEmail bool `json:"send_user_confirmation_email"` + SentryClientsideDSN string `json:"sentry_clientside_dsn"` + SentryDSN string `json:"sentry_dsn"` + SentryEnabled bool `json:"sentry_enabled"` + SentryEnvironment string `json:"sentry_environment"` + SessionExpireDelay int `json:"session_expire_delay"` + SharedRunnersEnabled bool `json:"shared_runners_enabled"` + SharedRunnersMinutes int `json:"shared_runners_minutes"` + SharedRunnersText string `json:"shared_runners_text"` + SidekiqJobLimiterCompressionThresholdBytes int `json:"sidekiq_job_limiter_compression_threshold_bytes"` + SidekiqJobLimiterLimitBytes int `json:"sidekiq_job_limiter_limit_bytes"` + SidekiqJobLimiterMode string `json:"sidekiq_job_limiter_mode"` + SignInText string `json:"sign_in_text"` + SignupEnabled bool `json:"signup_enabled"` + SlackAppEnabled bool `json:"slack_app_enabled"` + SlackAppID string `json:"slack_app_id"` + SlackAppSecret string `json:"slack_app_secret"` + SlackAppSigningSecret string `json:"slack_app_signing_secret"` + SlackAppVerificationToken string `json:"slack_app_verification_token"` + SnippetSizeLimit int `json:"snippet_size_limit"` + SnowplowAppID string `json:"snowplow_app_id"` + SnowplowCollectorHostname string `json:"snowplow_collector_hostname"` + SnowplowCookieDomain string `json:"snowplow_cookie_domain"` + SnowplowEnabled bool `json:"snowplow_enabled"` + SourcegraphEnabled bool `json:"sourcegraph_enabled"` + SourcegraphPublicOnly bool `json:"sourcegraph_public_only"` + SourcegraphURL string `json:"sourcegraph_url"` + SpamCheckAPIKey string `json:"spam_check_api_key"` + SpamCheckEndpointEnabled bool `json:"spam_check_endpoint_enabled"` + SpamCheckEndpointURL string `json:"spam_check_endpoint_url"` + SuggestPipelineEnabled bool `json:"suggest_pipeline_enabled"` + TerminalMaxSessionTime int `json:"terminal_max_session_time"` + Terms string `json:"terms"` + ThrottleAuthenticatedAPIEnabled bool `json:"throttle_authenticated_api_enabled"` + ThrottleAuthenticatedAPIPeriodInSeconds int `json:"throttle_authenticated_api_period_in_seconds"` + ThrottleAuthenticatedAPIRequestsPerPeriod int `json:"throttle_authenticated_api_requests_per_period"` + ThrottleAuthenticatedDeprecatedAPIEnabled bool `json:"throttle_authenticated_deprecated_api_enabled"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_authenticated_deprecated_api_period_in_seconds"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_authenticated_deprecated_api_requests_per_period"` + ThrottleAuthenticatedFilesAPIEnabled bool `json:"throttle_authenticated_files_api_enabled"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds int `json:"throttle_authenticated_files_api_period_in_seconds"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_authenticated_files_api_requests_per_period"` + ThrottleAuthenticatedGitLFSEnabled bool `json:"throttle_authenticated_git_lfs_enabled"` + ThrottleAuthenticatedGitLFSPeriodInSeconds int `json:"throttle_authenticated_git_lfs_period_in_seconds"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod int `json:"throttle_authenticated_git_lfs_requests_per_period"` + ThrottleAuthenticatedPackagesAPIEnabled bool `json:"throttle_authenticated_packages_api_enabled"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_authenticated_packages_api_period_in_seconds"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_authenticated_packages_api_requests_per_period"` + ThrottleAuthenticatedWebEnabled bool `json:"throttle_authenticated_web_enabled"` + ThrottleAuthenticatedWebPeriodInSeconds int `json:"throttle_authenticated_web_period_in_seconds"` + ThrottleAuthenticatedWebRequestsPerPeriod int `json:"throttle_authenticated_web_requests_per_period"` + ThrottleIncidentManagementNotificationEnabled bool `json:"throttle_incident_management_notification_enabled"` + ThrottleIncidentManagementNotificationPerPeriod int `json:"throttle_incident_management_notification_per_period"` + ThrottleIncidentManagementNotificationPeriodInSeconds int `json:"throttle_incident_management_notification_period_in_seconds"` + ThrottleProtectedPathsEnabled bool `json:"throttle_protected_paths_enabled"` + ThrottleProtectedPathsPeriodInSeconds int `json:"throttle_protected_paths_period_in_seconds"` + ThrottleProtectedPathsRequestsPerPeriod int `json:"throttle_protected_paths_requests_per_period"` + ThrottleUnauthenticatedAPIEnabled bool `json:"throttle_unauthenticated_api_enabled"` + ThrottleUnauthenticatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_api_period_in_seconds"` + ThrottleUnauthenticatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_api_requests_per_period"` + ThrottleUnauthenticatedDeprecatedAPIEnabled bool `json:"throttle_unauthenticated_deprecated_api_enabled"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds int `json:"throttle_unauthenticated_deprecated_api_period_in_seconds"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod int `json:"throttle_unauthenticated_deprecated_api_requests_per_period"` + ThrottleUnauthenticatedEnabled bool `json:"throttle_unauthenticated_enabled"` + ThrottleUnauthenticatedFilesAPIEnabled bool `json:"throttle_unauthenticated_files_api_enabled"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds int `json:"throttle_unauthenticated_files_api_period_in_seconds"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_files_api_requests_per_period"` + ThrottleUnauthenticatedGitLFSEnabled bool `json:"throttle_unauthenticated_git_lfs_enabled"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds int `json:"throttle_unauthenticated_git_lfs_period_in_seconds"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod int `json:"throttle_unauthenticated_git_lfs_requests_per_period"` + ThrottleUnauthenticatedPackagesAPIEnabled bool `json:"throttle_unauthenticated_packages_api_enabled"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds int `json:"throttle_unauthenticated_packages_api_period_in_seconds"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod int `json:"throttle_unauthenticated_packages_api_requests_per_period"` + ThrottleUnauthenticatedPeriodInSeconds int `json:"throttle_unauthenticated_period_in_seconds"` + ThrottleUnauthenticatedRequestsPerPeriod int `json:"throttle_unauthenticated_requests_per_period"` + ThrottleUnauthenticatedWebEnabled bool `json:"throttle_unauthenticated_web_enabled"` // deprecated + ThrottleUnauthenticatedWebPeriodInSeconds int `json:"throttle_unauthenticated_web_period_in_seconds"` // deprecated + ThrottleUnauthenticatedWebRequestsPerPeriod int `json:"throttle_unauthenticated_web_requests_per_period"` // deprecated + TimeTrackingLimitToHours bool `json:"time_tracking_limit_to_hours"` + TwoFactorGracePeriod int `json:"two_factor_grace_period"` + UniqueIPsLimitEnabled bool `json:"unique_ips_limit_enabled"` + UniqueIPsLimitPerUser int `json:"unique_ips_limit_per_user"` + UniqueIPsLimitTimeWindow int `json:"unique_ips_limit_time_window"` + UpdatedAt *time.Time `json:"updated_at"` + UpdatingNameDisabledForUsers bool `json:"updating_name_disabled_for_users"` + UsagePingEnabled bool `json:"usage_ping_enabled"` + UsagePingFeaturesEnabled bool `json:"usage_ping_features_enabled"` + UserDeactivationEmailsEnabled bool `json:"user_deactivation_emails_enabled"` + UserDefaultExternal bool `json:"user_default_external"` + UserDefaultInternalRegex string `json:"user_default_internal_regex"` + UserEmailLookupLimit int `json:"user_email_lookup_limit"` // deprecated + UserOauthApplications bool `json:"user_oauth_applications"` + UserShowAddSSHKeyMessage bool `json:"user_show_add_ssh_key_message"` + UsersGetByIDLimit int `json:"users_get_by_id_limit"` + UsersGetByIDLimitAllowlistRaw string `json:"users_get_by_id_limit_allowlist_raw"` + VersionCheckEnabled bool `json:"version_check_enabled"` + WebIDEClientsidePreviewEnabled bool `json:"web_ide_clientside_preview_enabled"` + WhatsNewVariant string `json:"whats_new_variant"` + WikiPageMaxContentBytes int `json:"wiki_page_max_content_bytes"` } func (s Settings) String() string { @@ -235,175 +410,341 @@ func (s *SettingsService) GetSettings(options ...RequestOptionFunc) (*Settings, // GitLab API docs: // https://docs.gitlab.com/ce/api/settings.html#change-application.settings type UpdateSettingsOptions struct { - AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` - AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` - AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` - AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` - AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` - AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` - AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` - AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` - AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` - AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` - ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` - AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` - AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` - AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` - AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` - AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` - AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` - AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` - CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` - CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` - ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` - DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` - DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` - DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` - DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` - DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` - DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` - DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` - DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` - DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` - DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` - DomainBlacklist *[]string `url:"domain_blacklist,omitempty" json:"domain_blacklist,omitempty"` - DomainBlacklistEnabled *bool `url:"domain_blacklist_enabled,omitempty" json:"domain_blacklist_enabled,omitempty"` - DomainWhitelist *[]string `url:"domain_whitelist,omitempty" json:"domain_whitelist,omitempty"` - DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` - ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` - Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` - ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` - ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` - ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` - ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` - ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` - ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` - ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` - ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` - ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` - ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` - EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` - EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` - EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` - EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` - ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` - ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` - ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` - ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` - ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` - ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` - ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` - FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` - FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` - GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` - GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` - GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` - GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` - GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` - GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` - GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` - GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` - GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` - GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` - GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` - HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` - HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` - HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` - HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` - HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` - HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` - HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` - HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` - HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` - HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` - HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` - HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` - HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` - ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` - InstanceStatisticsVisibilityPrivate *bool `url:"instance_statistics_visibility_private,omitempty" json:"instance_statistics_visibility_private,omitempty"` - LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` - MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` - MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` - MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` - MetricsEnabled *bool `url:"metrics_enabled,omitempty" json:"metrics_enabled,omitempty"` - MetricsHost *string `url:"metrics_host,omitempty" json:"metrics_host,omitempty"` - MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` - MetricsPacketSize *int `url:"metrics_packet_size,omitempty" json:"metrics_packet_size,omitempty"` - MetricsPoolSize *int `url:"metrics_pool_size,omitempty" json:"metrics_pool_size,omitempty"` - MetricsPort *int `url:"metrics_port,omitempty" json:"metrics_port,omitempty"` - MetricsSampleInterval *int `url:"metrics_sample_interval,omitempty" json:"metrics_sample_interval,omitempty"` - MetricsTimeout *int `url:"metrics_timeout,omitempty" json:"metrics_timeout,omitempty"` - MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` - MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` - MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` - MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` - OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` - PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` - PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` - PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` - PerformanceBarAllowedGroupID *string `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` - PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` - PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` - PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` - PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` - PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` - ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` - ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` - ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` - ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` - PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` - ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` - PseudonymizerEnabled *bool `url:"psedonymizer_enabled,omitempty" json:"psedonymizer_enabled,omitempty"` - PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` - PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` - RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` - RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` - RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` - ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` - RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` - RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` - RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` - RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` - RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` - RsaKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` - SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` - SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` - SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` - SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` - SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` - SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` - SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` - SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` - SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` - SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` - SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` - SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` - SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` - SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` - SnowplowSiteID *string `url:"snowplow_site_id,omitempty" json:"snowplow_site_id,omitempty"` - TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` - Terms *string `url:"terms,omitempty" json:"terms,omitempty"` - ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` - ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` - ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` - ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` - ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` - ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` - ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` - ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` - ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` - TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` - TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` - UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` - UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` - UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` - UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` - UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` - UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` - UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` - UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` - VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` - WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` + AbuseNotificationEmail *string `url:"abuse_notification_email,omitempty" json:"abuse_notification_email,omitempty"` + AdminMode *bool `url:"admin_mode,omitempty" json:"admin_mode,omitempty"` + AdminNotificationEmail *string `url:"admin_notification_email,omitempty" json:"admin_notification_email,omitempty"` + AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"` + AfterSignUpText *string `url:"after_sign_up_text,omitempty" json:"after_sign_up_text,omitempty"` + AkismetAPIKey *string `url:"akismet_api_key,omitempty" json:"akismet_api_key,omitempty"` + AkismetEnabled *bool `url:"akismet_enabled,omitempty" json:"akismet_enabled,omitempty"` + AllowGroupOwnersToManageLDAP *bool `url:"allow_group_owners_to_manage_ldap,omitempty" json:"allow_group_owners_to_manage_ldap,omitempty"` + AllowLocalRequestsFromHooksAndServices *bool `url:"allow_local_requests_from_hooks_and_services,omitempty" json:"allow_local_requests_from_hooks_and_services,omitempty"` + AllowLocalRequestsFromSystemHooks *bool `url:"allow_local_requests_from_system_hooks,omitempty" json:"allow_local_requests_from_system_hooks,omitempty"` + AllowLocalRequestsFromWebHooksAndServices *bool `url:"allow_local_requests_from_web_hooks_and_services,omitempty" json:"allow_local_requests_from_web_hooks_and_services,omitempty"` + ArchiveBuildsInHumanReadable *string `url:"archive_builds_in_human_readable,omitempty" json:"archive_builds_in_human_readable,omitempty"` + AssetProxyAllowlist *[]string `url:"asset_proxy_allowlist,omitempty" json:"asset_proxy_allowlist,omitempty"` + AssetProxyEnabled *bool `url:"asset_proxy_enabled,omitempty" json:"asset_proxy_enabled,omitempty"` + AssetProxySecretKey *string `url:"asset_proxy_secret_key,omitempty" json:"asset_proxy_secret_key,omitempty"` + AssetProxyURL *string `url:"asset_proxy_url,omitempty" json:"asset_proxy_url,omitempty"` + AssetProxyWhitelist *[]string `url:"asset_proxy_whitelist,omitempty" json:"asset_proxy_whitelist,omitempty"` + AuthorizedKeysEnabled *bool `url:"authorized_keys_enabled,omitempty" json:"authorized_keys_enabled,omitempty"` + AutoDevOpsDomain *string `url:"auto_devops_domain,omitempty" json:"auto_devops_domain,omitempty"` + AutoDevOpsEnabled *bool `url:"auto_devops_enabled,omitempty" json:"auto_devops_enabled,omitempty"` + AutomaticPurchasedStorageAllocation *bool `url:"automatic_purchased_storage_allocation,omitempty" json:"automatic_purchased_storage_allocation,omitempty"` + CheckNamespacePlan *bool `url:"check_namespace_plan,omitempty" json:"check_namespace_plan,omitempty"` + CommitEmailHostname *string `url:"commit_email_hostname,omitempty" json:"commit_email_hostname,omitempty"` + ContainerExpirationPoliciesEnableHistoricEntries *bool `url:"container_expiration_policies_enable_historic_entries,omitempty" json:"container_expiration_policies_enable_historic_entries,omitempty"` + ContainerRegistryCleanupTagsServiceMaxListSize *int `url:"container_registry_cleanup_tags_service_max_list_size,omitempty" json:"container_registry_cleanup_tags_service_max_list_size,omitempty"` + ContainerRegistryDeleteTagsServiceTimeout *int `url:"container_registry_delete_tags_service_timeout,omitempty" json:"container_registry_delete_tags_service_timeout,omitempty"` + ContainerRegistryExpirationPoliciesCaching *bool `url:"container_registry_expiration_policies_caching,omitempty" json:"container_registry_expiration_policies_caching,omitempty"` + ContainerRegistryExpirationPoliciesWorkerCapacity *int `url:"container_registry_expiration_policies_worker_capacity,omitempty" json:"container_registry_expiration_policies_worker_capacity,omitempty"` + ContainerRegistryImportCreatedBefore *time.Time `url:"container_registry_import_created_before,omitempty" json:"container_registry_import_created_before,omitempty"` + ContainerRegistryImportMaxRetries *int `url:"container_registry_import_max_retries,omitempty" json:"container_registry_import_max_retries,omitempty"` + ContainerRegistryImportMaxStepDuration *int `url:"container_registry_import_max_step_duration,omitempty" json:"container_registry_import_max_step_duration,omitempty"` + ContainerRegistryImportMaxTagsCount *int `url:"container_registry_import_max_tags_count,omitempty" json:"container_registry_import_max_tags_count,omitempty"` + ContainerRegistryImportStartMaxRetries *int `url:"container_registry_import_start_max_retries,omitempty" json:"container_registry_import_start_max_retries,omitempty"` + ContainerRegistryImportTargetPlan *string `url:"container_registry_import_target_plan,omitempty" json:"container_registry_import_target_plan,omitempty"` + ContainerRegistryTokenExpireDelay *int `url:"container_registry_token_expire_delay,omitempty" json:"container_registry_token_expire_delay,omitempty"` + CustomHTTPCloneURLRoot *string `url:"custom_http_clone_url_root,omitempty" json:"custom_http_clone_url_root,omitempty"` + DNSRebindingProtectionEnabled *bool `url:"dns_rebinding_protection_enabled,omitempty" json:"dns_rebinding_protection_enabled,omitempty"` + DSAKeyRestriction *int `url:"dsa_key_restriction,omitempty" json:"dsa_key_restriction,omitempty"` + DeactivateDormantUsers *bool `url:"deactivate_dormant_users,omitempty" json:"deactivate_dormant_users,omitempty"` + DefaultArtifactsExpireIn *string `url:"default_artifacts_expire_in,omitempty" json:"default_artifacts_expire_in,omitempty"` + DefaultBranchName *string `url:"default_branch_name,omitempty" json:"default_branch_name,omitempty"` + DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"` + DefaultCiConfigPath *string `url:"default_ci_config_path,omitempty" json:"default_ci_config_path,omitempty"` + DefaultGroupVisibility *VisibilityValue `url:"default_group_visibility,omitempty" json:"default_group_visibility,omitempty"` + DefaultProjectCreation *int `url:"default_project_creation,omitempty" json:"default_project_creation,omitempty"` + DefaultProjectDeletionProtection *bool `url:"default_project_deletion_protection,omitempty" json:"default_project_deletion_protection,omitempty"` + DefaultProjectVisibility *VisibilityValue `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"` + DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"` + DefaultSnippetVisibility *VisibilityValue `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"` + DelayedGroupDeletion *bool `url:"delayed_group_deletion,omitempty" json:"delayed_group_deletion,omitempty"` + DelayedProjectDeletion *bool `url:"delayed_project_deletion,omitempty" json:"delayed_project_deletion,omitempty"` + DeleteInactiveProjects *bool `url:"delete_inactive_projects,omitempty" json:"delete_inactive_projects,omitempty"` + DeletionAdjournedPeriod *int `url:"deletion_adjourned_period,omitempty" json:"deletion_adjourned_period,omitempty"` + DiffMaxFiles *int `url:"diff_max_files,omitempty" json:"diff_max_files,omitempty"` + DiffMaxLines *int `url:"diff_max_lines,omitempty" json:"diff_max_lines,omitempty"` + DiffMaxPatchBytes *int `url:"diff_max_patch_bytes,omitempty" json:"diff_max_patch_bytes,omitempty"` + DisableFeedToken *bool `url:"disable_feed_token,omitempty" json:"disable_feed_token,omitempty"` + DisableOverridingApproversPerMergeRequest *bool `url:"disable_overriding_approvers_per_merge_request,omitempty" json:"disable_overriding_approvers_per_merge_request,omitempty"` + DisabledOauthSignInSources *[]string `url:"disabled_oauth_sign_in_sources,omitempty" json:"disabled_oauth_sign_in_sources,omitempty"` + DomainAllowlist *[]string `url:"domain_allowlist,omitempty" json:"domain_allowlist,omitempty"` + DomainDenylist *[]string `url:"domain_denylist,omitempty" json:"domain_denylist,omitempty"` + DomainDenylistEnabled *bool `url:"domain_denylist_enabled,omitempty" json:"domain_denylist_enabled,omitempty"` + ECDSAKeyRestriction *int `url:"ecdsa_key_restriction,omitempty" json:"ecdsa_key_restriction,omitempty"` + ECDSASKKeyRestriction *int `url:"ecdsa_sk_key_restriction,omitempty" json:"ecdsa_sk_key_restriction,omitempty"` + EKSAccessKeyID *string `url:"eks_access_key_id,omitempty" json:"eks_access_key_id,omitempty"` + EKSAccountID *string `url:"eks_account_id,omitempty" json:"eks_account_id,omitempty"` + EKSIntegrationEnabled *bool `url:"eks_integration_enabled,omitempty" json:"eks_integration_enabled,omitempty"` + EKSSecretAccessKey *string `url:"eks_secret_access_key,omitempty" json:"eks_secret_access_key,omitempty"` + Ed25519KeyRestriction *int `url:"ed25519_key_restriction,omitempty" json:"ed25519_key_restriction,omitempty"` + Ed25519SKKeyRestriction *int `url:"ed25519_sk_key_restriction,omitempty" json:"ed25519_sk_key_restriction,omitempty"` + ElasticsearchAWS *bool `url:"elasticsearch_aws,omitempty" json:"elasticsearch_aws,omitempty"` + ElasticsearchAWSAccessKey *string `url:"elasticsearch_aws_access_key,omitempty" json:"elasticsearch_aws_access_key,omitempty"` + ElasticsearchAWSRegion *string `url:"elasticsearch_aws_region,omitempty" json:"elasticsearch_aws_region,omitempty"` + ElasticsearchAWSSecretAccessKey *string `url:"elasticsearch_aws_secret_access_key,omitempty" json:"elasticsearch_aws_secret_access_key,omitempty"` + ElasticsearchAnalyzersKuromojiEnabled *bool `url:"elasticsearch_analyzers_kuromoji_enabled,omitempty" json:"elasticsearch_analyzers_kuromoji_enabled,omitempty"` + ElasticsearchAnalyzersKuromojiSearch *int `url:"elasticsearch_analyzers_kuromoji_search,omitempty" json:"elasticsearch_analyzers_kuromoji_search,omitempty"` + ElasticsearchAnalyzersSmartCNEnabled *bool `url:"elasticsearch_analyzers_smartcn_enabled,omitempty" json:"elasticsearch_analyzers_smartcn_enabled,omitempty"` + ElasticsearchAnalyzersSmartCNSearch *int `url:"elasticsearch_analyzers_smartcn_search,omitempty" json:"elasticsearch_analyzers_smartcn_search,omitempty"` + ElasticsearchClientRequestTimeout *int `url:"elasticsearch_client_request_timeout,omitempty" json:"elasticsearch_client_request_timeout,omitempty"` + ElasticsearchIndexedFieldLengthLimit *int `url:"elasticsearch_indexed_field_length_limit,omitempty" json:"elasticsearch_indexed_field_length_limit,omitempty"` + ElasticsearchIndexedFileSizeLimitKB *int `url:"elasticsearch_indexed_file_size_limit_kb,omitempty" json:"elasticsearch_indexed_file_size_limit_kb,omitempty"` + ElasticsearchIndexing *bool `url:"elasticsearch_indexing,omitempty" json:"elasticsearch_indexing,omitempty"` + ElasticsearchLimitIndexing *bool `url:"elasticsearch_limit_indexing,omitempty" json:"elasticsearch_limit_indexing,omitempty"` + ElasticsearchMaxBulkConcurrency *int `url:"elasticsearch_max_bulk_concurrency,omitempty" json:"elasticsearch_max_bulk_concurrency,omitempty"` + ElasticsearchMaxBulkSizeMB *int `url:"elasticsearch_max_bulk_size_mb,omitempty" json:"elasticsearch_max_bulk_size_mb,omitempty"` + ElasticsearchNamespaceIDs *[]int `url:"elasticsearch_namespace_ids,omitempty" json:"elasticsearch_namespace_ids,omitempty"` + ElasticsearchPassword *string `url:"elasticsearch_password,omitempty" json:"elasticsearch_password,omitempty"` + ElasticsearchPauseIndexing *bool `url:"elasticsearch_pause_indexing,omitempty" json:"elasticsearch_pause_indexing,omitempty"` + ElasticsearchProjectIDs *[]int `url:"elasticsearch_project_ids,omitempty" json:"elasticsearch_project_ids,omitempty"` + ElasticsearchReplicas *int `url:"elasticsearch_replicas,omitempty" json:"elasticsearch_replicas,omitempty"` + ElasticsearchSearch *bool `url:"elasticsearch_search,omitempty" json:"elasticsearch_search,omitempty"` + ElasticsearchShards *int `url:"elasticsearch_shards,omitempty" json:"elasticsearch_shards,omitempty"` + ElasticsearchURL *string `url:"elasticsearch_url,omitempty" json:"elasticsearch_url,omitempty"` + ElasticsearchUsername *string `url:"elasticsearch_username,omitempty" json:"elasticsearch_username,omitempty"` + EmailAdditionalText *string `url:"email_additional_text,omitempty" json:"email_additional_text,omitempty"` + EmailAuthorInBody *bool `url:"email_author_in_body,omitempty" json:"email_author_in_body,omitempty"` + EmailRestrictions *string `url:"email_restrictions,omitempty" json:"email_restrictions,omitempty"` + EmailRestrictionsEnabled *bool `url:"email_restrictions_enabled,omitempty" json:"email_restrictions_enabled,omitempty"` + EnabledGitAccessProtocol *string `url:"enabled_git_access_protocol,omitempty" json:"enabled_git_access_protocol,omitempty"` + EnforceNamespaceStorageLimit *bool `url:"enforce_namespace_storage_limit,omitempty" json:"enforce_namespace_storage_limit,omitempty"` + EnforcePATExpiration *bool `url:"enforce_pat_expiration,omitempty" json:"enforce_pat_expiration,omitempty"` + EnforceSSHKeyExpiration *bool `url:"enforce_ssh_key_expiration,omitempty" json:"enforce_ssh_key_expiration,omitempty"` + EnforceTerms *bool `url:"enforce_terms,omitempty" json:"enforce_terms,omitempty"` + ExternalAuthClientCert *string `url:"external_auth_client_cert,omitempty" json:"external_auth_client_cert,omitempty"` + ExternalAuthClientKey *string `url:"external_auth_client_key,omitempty" json:"external_auth_client_key,omitempty"` + ExternalAuthClientKeyPass *string `url:"external_auth_client_key_pass,omitempty" json:"external_auth_client_key_pass,omitempty"` + ExternalAuthorizationServiceDefaultLabel *string `url:"external_authorization_service_default_label,omitempty" json:"external_authorization_service_default_label,omitempty"` + ExternalAuthorizationServiceEnabled *bool `url:"external_authorization_service_enabled,omitempty" json:"external_authorization_service_enabled,omitempty"` + ExternalAuthorizationServiceTimeout *float64 `url:"external_authorization_service_timeout,omitempty" json:"external_authorization_service_timeout,omitempty"` + ExternalAuthorizationServiceURL *string `url:"external_authorization_service_url,omitempty" json:"external_authorization_service_url,omitempty"` + ExternalPipelineValidationServiceTimeout *int `url:"external_pipeline_validation_service_timeout,omitempty" json:"external_pipeline_validation_service_timeout,omitempty"` + ExternalPipelineValidationServiceToken *string `url:"external_pipeline_validation_service_token,omitempty" json:"external_pipeline_validation_service_token,omitempty"` + ExternalPipelineValidationServiceURL *string `url:"external_pipeline_validation_service_url,omitempty" json:"external_pipeline_validation_service_url,omitempty"` + FileTemplateProjectID *int `url:"file_template_project_id,omitempty" json:"file_template_project_id,omitempty"` + FirstDayOfWeek *int `url:"first_day_of_week,omitempty" json:"first_day_of_week,omitempty"` + FlocEnabled *bool `url:"floc_enabled,omitempty" json:"floc_enabled,omitempty"` + GeoNodeAllowedIPs *string `url:"geo_node_allowed_ips,omitempty" json:"geo_node_allowed_ips,omitempty"` + GeoStatusTimeout *int `url:"geo_status_timeout,omitempty" json:"geo_status_timeout,omitempty"` + GitTwoFactorSessionExpiry *int `url:"git_two_factor_session_expiry,omitempty" json:"git_two_factor_session_expiry,omitempty"` + GitalyTimeoutDefault *int `url:"gitaly_timeout_default,omitempty" json:"gitaly_timeout_default,omitempty"` + GitalyTimeoutFast *int `url:"gitaly_timeout_fast,omitempty" json:"gitaly_timeout_fast,omitempty"` + GitalyTimeoutMedium *int `url:"gitaly_timeout_medium,omitempty" json:"gitaly_timeout_medium,omitempty"` + GitpodEnabled *bool `url:"gitpod_enabled,omitempty" json:"gitpod_enabled,omitempty"` + GitpodURL *string `url:"gitpod_url,omitempty" json:"gitpod_url,omitempty"` + GitRateLimitUsersAllowlist *[]string `url:"git_rate_limit_users_allowlist,omitempty" json:"git_rate_limit_users_allowlist,omitempty"` + GrafanaEnabled *bool `url:"grafana_enabled,omitempty" json:"grafana_enabled,omitempty"` + GrafanaURL *string `url:"grafana_url,omitempty" json:"grafana_url,omitempty"` + GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"` + GroupDownloadExportLimit *int `url:"group_download_export_limit,omitempty" json:"group_download_export_limit,omitempty"` + GroupExportLimit *int `url:"group_export_limit,omitempty" json:"group_export_limit,omitempty"` + GroupImportLimit *int `url:"group_import_limit,omitempty" json:"group_import_limit,omitempty"` + GroupOwnersCanManageDefaultBranchProtection *bool `url:"group_owners_can_manage_default_branch_protection,omitempty" json:"group_owners_can_manage_default_branch_protection,omitempty"` + GroupRunnerTokenExpirationInterval *int `url:"group_runner_token_expiration_interval,omitempty" json:"group_runner_token_expiration_interval,omitempty"` + HTMLEmailsEnabled *bool `url:"html_emails_enabled,omitempty" json:"html_emails_enabled,omitempty"` + HashedStorageEnabled *bool `url:"hashed_storage_enabled,omitempty" json:"hashed_storage_enabled,omitempty"` + HelpPageDocumentationBaseURL *string `url:"help_page_documentation_base_url,omitempty" json:"help_page_documentation_base_url,omitempty"` + HelpPageHideCommercialContent *bool `url:"help_page_hide_commercial_content,omitempty" json:"help_page_hide_commercial_content,omitempty"` + HelpPageSupportURL *string `url:"help_page_support_url,omitempty" json:"help_page_support_url,omitempty"` + HelpPageText *string `url:"help_page_text,omitempty" json:"help_page_text,omitempty"` + HelpText *string `url:"help_text,omitempty" json:"help_text,omitempty"` + HideThirdPartyOffers *bool `url:"hide_third_party_offers,omitempty" json:"hide_third_party_offers,omitempty"` + HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"` + HousekeepingBitmapsEnabled *bool `url:"housekeeping_bitmaps_enabled,omitempty" json:"housekeeping_bitmaps_enabled,omitempty"` + HousekeepingEnabled *bool `url:"housekeeping_enabled,omitempty" json:"housekeeping_enabled,omitempty"` + HousekeepingFullRepackPeriod *int `url:"housekeeping_full_repack_period,omitempty" json:"housekeeping_full_repack_period,omitempty"` + HousekeepingGcPeriod *int `url:"housekeeping_gc_period,omitempty" json:"housekeeping_gc_period,omitempty"` + HousekeepingIncrementalRepackPeriod *int `url:"housekeeping_incremental_repack_period,omitempty" json:"housekeeping_incremental_repack_period,omitempty"` + ImportSources *[]string `url:"import_sources,omitempty" json:"import_sources,omitempty"` + InactiveProjectsDeleteAfterMonths *int `url:"inactive_projects_delete_after_months,omitempty" json:"inactive_projects_delete_after_months,omitempty"` + InactiveProjectsMinSizeMB *int `url:"inactive_projects_min_size_mb,omitempty" json:"inactive_projects_min_size_mb,omitempty"` + InactiveProjectsSendWarningEmailAfterMonths *int `url:"inactive_projects_send_warning_email_after_months,omitempty" json:"inactive_projects_send_warning_email_after_months,omitempty"` + InProductMarketingEmailsEnabled *bool `url:"in_product_marketing_emails_enabled,omitempty" json:"in_product_marketing_emails_enabled,omitempty"` + InvisibleCaptchaEnabled *bool `url:"invisible_captcha_enabled,omitempty" json:"invisible_captcha_enabled,omitempty"` + IssuesCreateLimit *int `url:"issues_create_limit,omitempty" json:"issues_create_limit,omitempty"` + KeepLatestArtifact *bool `url:"keep_latest_artifact,omitempty" json:"keep_latest_artifact,omitempty"` + KrokiEnabled *bool `url:"kroki_enabled,omitempty" json:"kroki_enabled,omitempty"` + KrokiFormats *map[string]bool `url:"kroki_formats,omitempty" json:"kroki_formats,omitempty"` + KrokiURL *string `url:"kroki_url,omitempty" json:"kroki_url,omitempty"` + LocalMarkdownVersion *int `url:"local_markdown_version,omitempty" json:"local_markdown_version,omitempty"` + LockMembershipsToLDAP *bool `url:"lock_memberships_to_ldap,omitempty" json:"lock_memberships_to_ldap,omitempty"` + LoginRecaptchaProtectionEnabled *bool `url:"login_recaptcha_protection_enabled,omitempty" json:"login_recaptcha_protection_enabled,omitempty"` + MailgunEventsEnabled *bool `url:"mailgun_events_enabled,omitempty" json:"mailgun_events_enabled,omitempty"` + MailgunSigningKey *string `url:"mailgun_signing_key,omitempty" json:"mailgun_signing_key,omitempty"` + MaintenanceMode *bool `url:"maintenance_mode,omitempty" json:"maintenance_mode,omitempty"` + MaintenanceModeMessage *string `url:"maintenance_mode_message,omitempty" json:"maintenance_mode_message,omitempty"` + MaxArtifactsSize *int `url:"max_artifacts_size,omitempty" json:"max_artifacts_size,omitempty"` + MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"` + MaxExportSize *int `url:"max_export_size,omitempty" json:"max_export_size,omitempty"` + MaxImportSize *int `url:"max_import_size,omitempty" json:"max_import_size,omitempty"` + MaxNumberOfRepositoryDownloads *int `url:"max_number_of_repository_downloads,omitempty" json:"max_number_of_repository_downloads,omitempty"` + MaxNumberOfRepositoryDownloadsWithinTimePeriod *int `url:"max_number_of_repository_downloads_within_time_period,omitempty" json:"max_number_of_repository_downloads_within_time_period,omitempty"` + MaxPagesSize *int `url:"max_pages_size,omitempty" json:"max_pages_size,omitempty"` + MaxPersonalAccessTokenLifetime *int `url:"max_personal_access_token_lifetime,omitempty" json:"max_personal_access_token_lifetime,omitempty"` + MaxSSHKeyLifetime *int `url:"max_ssh_key_lifetime,omitempty" json:"max_ssh_key_lifetime,omitempty"` + MaxYAMLDepth *int `url:"max_yaml_depth,omitempty" json:"max_yaml_depth,omitempty"` + MaxYAMLSizeBytes *int `url:"max_yaml_size_bytes,omitempty" json:"max_yaml_size_bytes,omitempty"` + MetricsMethodCallThreshold *int `url:"metrics_method_call_threshold,omitempty" json:"metrics_method_call_threshold,omitempty"` + MinimumPasswordLength *int `url:"minimum_password_length,omitempty" json:"minimum_password_length,omitempty"` + MirrorAvailable *bool `url:"mirror_available,omitempty" json:"mirror_available,omitempty"` + MirrorCapacityThreshold *int `url:"mirror_capacity_threshold,omitempty" json:"mirror_capacity_threshold,omitempty"` + MirrorMaxCapacity *int `url:"mirror_max_capacity,omitempty" json:"mirror_max_capacity,omitempty"` + MirrorMaxDelay *int `url:"mirror_max_delay,omitempty" json:"mirror_max_delay,omitempty"` + NPMPackageRequestsForwarding *bool `url:"npm_package_requests_forwarding,omitempty" json:"npm_package_requests_forwarding,omitempty"` + NotesCreateLimit *int `url:"notes_create_limit,omitempty" json:"notes_create_limit,omitempty"` + NotifyOnUnknownSignIn *bool `url:"notify_on_unknown_sign_in,omitempty" json:"notify_on_unknown_sign_in,omitempty"` + OutboundLocalRequestsAllowlistRaw *string `url:"outbound_local_requests_allowlist_raw,omitempty" json:"outbound_local_requests_allowlist_raw,omitempty"` + OutboundLocalRequestsWhitelist *[]string `url:"outbound_local_requests_whitelist,omitempty" json:"outbound_local_requests_whitelist,omitempty"` + PackageRegistryCleanupPoliciesWorkerCapacity *int `url:"package_registry_cleanup_policies_worker_capacity,omitempty" json:"package_registry_cleanup_policies_worker_capacity,omitempty"` + PagesDomainVerificationEnabled *bool `url:"pages_domain_verification_enabled,omitempty" json:"pages_domain_verification_enabled,omitempty"` + PasswordAuthenticationEnabledForGit *bool `url:"password_authentication_enabled_for_git,omitempty" json:"password_authentication_enabled_for_git,omitempty"` + PasswordAuthenticationEnabledForWeb *bool `url:"password_authentication_enabled_for_web,omitempty" json:"password_authentication_enabled_for_web,omitempty"` + PasswordNumberRequired *bool `url:"password_number_required,omitempty" json:"password_number_required,omitempty"` + PasswordSymbolRequired *bool `url:"password_symbol_required,omitempty" json:"password_symbol_required,omitempty"` + PasswordUppercaseRequired *bool `url:"password_uppercase_required,omitempty" json:"password_uppercase_required,omitempty"` + PasswordLowercaseRequired *bool `url:"password_lowercase_required,omitempty" json:"password_lowercase_required,omitempty"` + PerformanceBarAllowedGroupID *string `url:"performance_bar_allowed_group_id,omitempty" json:"performance_bar_allowed_group_id,omitempty"` + PerformanceBarAllowedGroupPath *string `url:"performance_bar_allowed_group_path,omitempty" json:"performance_bar_allowed_group_path,omitempty"` + PerformanceBarEnabled *bool `url:"performance_bar_enabled,omitempty" json:"performance_bar_enabled,omitempty"` + PersonalAccessTokenPrefix *string `url:"personal_access_token_prefix,omitempty" json:"personal_access_token_prefix,omitempty"` + PlantumlEnabled *bool `url:"plantuml_enabled,omitempty" json:"plantuml_enabled,omitempty"` + PlantumlURL *string `url:"plantuml_url,omitempty" json:"plantuml_url,omitempty"` + PipelineLimitPerProjectUserSha *int `url:"pipeline_limit_per_project_user_sha,omitempty" json:"pipeline_limit_per_project_user_sha,omitempty"` + PollingIntervalMultiplier *float64 `url:"polling_interval_multiplier,omitempty" json:"polling_interval_multiplier,omitempty"` + PreventMergeRequestsAuthorApproval *bool `url:"prevent_merge_requests_author_approval,omitempty" json:"prevent_merge_requests_author_approval,omitempty"` + PreventMergeRequestsCommittersApproval *bool `url:"prevent_merge_requests_committers_approval,omitempty" json:"prevent_merge_requests_committers_approval,omitempty"` + ProjectDownloadExportLimit *int `url:"project_download_export_limit,omitempty" json:"project_download_export_limit,omitempty"` + ProjectExportEnabled *bool `url:"project_export_enabled,omitempty" json:"project_export_enabled,omitempty"` + ProjectExportLimit *int `url:"project_export_limit,omitempty" json:"project_export_limit,omitempty"` + ProjectImportLimit *int `url:"project_import_limit,omitempty" json:"project_import_limit,omitempty"` + ProjectRunnerTokenExpirationInterval *int `url:"project_runner_token_expiration_interval,omitempty" json:"project_runner_token_expiration_interval,omitempty"` + PrometheusMetricsEnabled *bool `url:"prometheus_metrics_enabled,omitempty" json:"prometheus_metrics_enabled,omitempty"` + ProtectedCIVariables *bool `url:"protected_ci_variables,omitempty" json:"protected_ci_variables,omitempty"` + PseudonymizerEnabled *bool `url:"pseudonymizer_enabled,omitempty" json:"pseudonymizer_enabled,omitempty"` + PushEventActivitiesLimit *int `url:"push_event_activities_limit,omitempty" json:"push_event_activities_limit,omitempty"` + PushEventHooksLimit *int `url:"push_event_hooks_limit,omitempty" json:"push_event_hooks_limit,omitempty"` + PyPIPackageRequestsForwarding *bool `url:"pypi_package_requests_forwarding,omitempty" json:"pypi_package_requests_forwarding,omitempty"` + RSAKeyRestriction *int `url:"rsa_key_restriction,omitempty" json:"rsa_key_restriction,omitempty"` + RateLimitingResponseText *string `url:"rate_limiting_response_text,omitempty" json:"rate_limiting_response_text,omitempty"` + RawBlobRequestLimit *int `url:"raw_blob_request_limit,omitempty" json:"raw_blob_request_limit,omitempty"` + RecaptchaEnabled *bool `url:"recaptcha_enabled,omitempty" json:"recaptcha_enabled,omitempty"` + RecaptchaPrivateKey *string `url:"recaptcha_private_key,omitempty" json:"recaptcha_private_key,omitempty"` + RecaptchaSiteKey *string `url:"recaptcha_site_key,omitempty" json:"recaptcha_site_key,omitempty"` + ReceiveMaxInputSize *int `url:"receive_max_input_size,omitempty" json:"receive_max_input_size,omitempty"` + RepositoryChecksEnabled *bool `url:"repository_checks_enabled,omitempty" json:"repository_checks_enabled,omitempty"` + RepositorySizeLimit *int `url:"repository_size_limit,omitempty" json:"repository_size_limit,omitempty"` + RepositoryStorages *[]string `url:"repository_storages,omitempty" json:"repository_storages,omitempty"` + RepositoryStoragesWeighted *map[string]int `url:"repository_storages_weighted,omitempty" json:"repository_storages_weighted,omitempty"` + RequireAdminApprovalAfterUserSignup *bool `url:"require_admin_approval_after_user_signup,omitempty" json:"require_admin_approval_after_user_signup,omitempty"` + RequireTwoFactorAuthentication *bool `url:"require_two_factor_authentication,omitempty" json:"require_two_factor_authentication,omitempty"` + RestrictedVisibilityLevels *[]VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"` + RunnerTokenExpirationInterval *int `url:"runner_token_expiration_interval,omitempty" json:"runner_token_expiration_interval,omitempty"` + SearchRateLimit *int `url:"search_rate_limit,omitempty" json:"search_rate_limit,omitempty"` + SearchRateLimitUnauthenticated *int `url:"search_rate_limit_unauthenticated,omitempty" json:"search_rate_limit_unauthenticated,omitempty"` + SecretDetectionRevocationTokenTypesURL *string `url:"secret_detection_revocation_token_types_url,omitempty" json:"secret_detection_revocation_token_types_url,omitempty"` + SecretDetectionTokenRevocationEnabled *bool `url:"secret_detection_token_revocation_enabled,omitempty" json:"secret_detection_token_revocation_enabled,omitempty"` + SecretDetectionTokenRevocationToken *string `url:"secret_detection_token_revocation_token,omitempty" json:"secret_detection_token_revocation_token,omitempty"` + SecretDetectionTokenRevocationURL *string `url:"secret_detection_token_revocation_url,omitempty" json:"secret_detection_token_revocation_url,omitempty"` + SendUserConfirmationEmail *bool `url:"send_user_confirmation_email,omitempty" json:"send_user_confirmation_email,omitempty"` + SentryClientsideDSN *string `url:"sentry_clientside_dsn,omitempty" json:"sentry_clientside_dsn,omitempty"` + SentryDSN *string `url:"sentry_dsn,omitempty" json:"sentry_dsn,omitempty"` + SentryEnabled *string `url:"sentry_enabled,omitempty" json:"sentry_enabled,omitempty"` + SentryEnvironment *string `url:"sentry_environment,omitempty" json:"sentry_environment,omitempty"` + SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"` + SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"` + SharedRunnersMinutes *int `url:"shared_runners_minutes,omitempty" json:"shared_runners_minutes,omitempty"` + SharedRunnersText *string `url:"shared_runners_text,omitempty" json:"shared_runners_text,omitempty"` + SidekiqJobLimiterCompressionThresholdBytes *int `url:"sidekiq_job_limiter_compression_threshold_bytes,omitempty" json:"sidekiq_job_limiter_compression_threshold_bytes,omitempty"` + SidekiqJobLimiterLimitBytes *int `url:"sidekiq_job_limiter_limit_bytes,omitempty" json:"sidekiq_job_limiter_limit_bytes,omitempty"` + SidekiqJobLimiterMode *string `url:"sidekiq_job_limiter_mode,omitempty" json:"sidekiq_job_limiter_mode,omitempty"` + SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"` + SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"` + SlackAppEnabled *bool `url:"slack_app_enabled,omitempty" json:"slack_app_enabled,omitempty"` + SlackAppID *string `url:"slack_app_id,omitempty" json:"slack_app_id,omitempty"` + SlackAppSecret *string `url:"slack_app_secret,omitempty" json:"slack_app_secret,omitempty"` + SlackAppSigningSecret *string `url:"slack_app_signing_secret,omitempty" json:"slack_app_signing_secret,omitempty"` + SlackAppVerificationToken *string `url:"slack_app_verification_token,omitempty" json:"slack_app_verification_token,omitempty"` + SnippetSizeLimit *int `url:"snippet_size_limit,omitempty" json:"snippet_size_limit,omitempty"` + SnowplowAppID *string `url:"snowplow_app_id,omitempty" json:"snowplow_app_id,omitempty"` + SnowplowCollectorHostname *string `url:"snowplow_collector_hostname,omitempty" json:"snowplow_collector_hostname,omitempty"` + SnowplowCookieDomain *string `url:"snowplow_cookie_domain,omitempty" json:"snowplow_cookie_domain,omitempty"` + SnowplowEnabled *bool `url:"snowplow_enabled,omitempty" json:"snowplow_enabled,omitempty"` + SourcegraphEnabled *bool `url:"sourcegraph_enabled,omitempty" json:"sourcegraph_enabled,omitempty"` + SourcegraphPublicOnly *bool `url:"sourcegraph_public_only,omitempty" json:"sourcegraph_public_only,omitempty"` + SourcegraphURL *string `url:"sourcegraph_url,omitempty" json:"sourcegraph_url,omitempty"` + SpamCheckAPIKey *string `url:"spam_check_api_key,omitempty" json:"spam_check_api_key,omitempty"` + SpamCheckEndpointEnabled *bool `url:"spam_check_endpoint_enabled,omitempty" json:"spam_check_endpoint_enabled,omitempty"` + SpamCheckEndpointURL *string `url:"spam_check_endpoint_url,omitempty" json:"spam_check_endpoint_url,omitempty"` + SuggestPipelineEnabled *bool `url:"suggest_pipeline_enabled,omitempty" json:"suggest_pipeline_enabled,omitempty"` + TerminalMaxSessionTime *int `url:"terminal_max_session_time,omitempty" json:"terminal_max_session_time,omitempty"` + Terms *string `url:"terms,omitempty" json:"terms,omitempty"` + ThrottleAuthenticatedAPIEnabled *bool `url:"throttle_authenticated_api_enabled,omitempty" json:"throttle_authenticated_api_enabled,omitempty"` + ThrottleAuthenticatedAPIPeriodInSeconds *int `url:"throttle_authenticated_api_period_in_seconds,omitempty" json:"throttle_authenticated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_api_requests_per_period,omitempty" json:"throttle_authenticated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedDeprecatedAPIEnabled *bool `url:"throttle_authenticated_deprecated_api_enabled,omitempty" json:"throttle_authenticated_deprecated_api_enabled,omitempty"` + ThrottleAuthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_authenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_authenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_authenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleAuthenticatedFilesAPIEnabled *bool `url:"throttle_authenticated_files_api_enabled,omitempty" json:"throttle_authenticated_files_api_enabled,omitempty"` + ThrottleAuthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_authenticated_files_api_period_in_seconds,omitempty" json:"throttle_authenticated_files_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_authenticated_files_api_requests_per_period,omitempty" json:"throttle_authenticated_files_api_requests_per_period,omitempty"` + ThrottleAuthenticatedGitLFSEnabled *bool `url:"throttle_authenticated_git_lfs_enabled,omitempty" json:"throttle_authenticated_git_lfs_enabled,omitempty"` + ThrottleAuthenticatedGitLFSPeriodInSeconds *int `url:"throttle_authenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_authenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleAuthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_authenticated_git_lfs_requests_per_period,omitempty" json:"throttle_authenticated_git_lfs_requests_per_period,omitempty"` + ThrottleAuthenticatedPackagesAPIEnabled *bool `url:"throttle_authenticated_packages_api_enabled,omitempty" json:"throttle_authenticated_packages_api_enabled,omitempty"` + ThrottleAuthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_authenticated_packages_api_period_in_seconds,omitempty" json:"throttle_authenticated_packages_api_period_in_seconds,omitempty"` + ThrottleAuthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_authenticated_packages_api_requests_per_period,omitempty" json:"throttle_authenticated_packages_api_requests_per_period,omitempty"` + ThrottleAuthenticatedWebEnabled *bool `url:"throttle_authenticated_web_enabled,omitempty" json:"throttle_authenticated_web_enabled,omitempty"` + ThrottleAuthenticatedWebPeriodInSeconds *int `url:"throttle_authenticated_web_period_in_seconds,omitempty" json:"throttle_authenticated_web_period_in_seconds,omitempty"` + ThrottleAuthenticatedWebRequestsPerPeriod *int `url:"throttle_authenticated_web_requests_per_period,omitempty" json:"throttle_authenticated_web_requests_per_period,omitempty"` + ThrottleIncidentManagementNotificationEnabled *bool `url:"throttle_incident_management_notification_enabled,omitempty" json:"throttle_incident_management_notification_enabled,omitempty"` + ThrottleIncidentManagementNotificationPerPeriod *int `url:"throttle_incident_management_notification_per_period,omitempty" json:"throttle_incident_management_notification_per_period,omitempty"` + ThrottleIncidentManagementNotificationPeriodInSeconds *int `url:"throttle_incident_management_notification_period_in_seconds,omitempty" json:"throttle_incident_management_notification_period_in_seconds,omitempty"` + ThrottleProtectedPathsEnabled *bool `url:"throttle_protected_paths_enabled_enabled,omitempty" json:"throttle_protected_paths_enabled,omitempty"` + ThrottleProtectedPathsPeriodInSeconds *int `url:"throttle_protected_paths_enabled_period_in_seconds,omitempty" json:"throttle_protected_paths_period_in_seconds,omitempty"` + ThrottleProtectedPathsRequestsPerPeriod *int `url:"throttle_protected_paths_enabled_requests_per_period,omitempty" json:"throttle_protected_paths_per_period,omitempty"` + ThrottleUnauthenticatedAPIEnabled *bool `url:"throttle_unauthenticated_api_enabled,omitempty" json:"throttle_unauthenticated_api_enabled,omitempty"` + ThrottleUnauthenticatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIEnabled *bool `url:"throttle_unauthenticated_deprecated_api_enabled,omitempty" json:"throttle_unauthenticated_deprecated_api_enabled,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIPeriodInSeconds *int `url:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_deprecated_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedDeprecatedAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty" json:"throttle_unauthenticated_deprecated_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedEnabled *bool `url:"throttle_unauthenticated_enabled,omitempty" json:"throttle_unauthenticated_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIEnabled *bool `url:"throttle_unauthenticated_files_api_enabled,omitempty" json:"throttle_unauthenticated_files_api_enabled,omitempty"` + ThrottleUnauthenticatedFilesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_files_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_files_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedFilesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_files_api_requests_per_period,omitempty" json:"throttle_unauthenticated_files_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedGitLFSEnabled *bool `url:"throttle_unauthenticated_git_lfs_enabled,omitempty" json:"throttle_unauthenticated_git_lfs_enabled,omitempty"` + ThrottleUnauthenticatedGitLFSPeriodInSeconds *int `url:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty" json:"throttle_unauthenticated_git_lfs_period_in_seconds,omitempty"` + ThrottleUnauthenticatedGitLFSRequestsPerPeriod *int `url:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty" json:"throttle_unauthenticated_git_lfs_requests_per_period,omitempty"` + ThrottleUnauthenticatedPackagesAPIEnabled *bool `url:"throttle_unauthenticated_packages_api_enabled,omitempty" json:"throttle_unauthenticated_packages_api_enabled,omitempty"` + ThrottleUnauthenticatedPackagesAPIPeriodInSeconds *int `url:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty" json:"throttle_unauthenticated_packages_api_period_in_seconds,omitempty"` + ThrottleUnauthenticatedPackagesAPIRequestsPerPeriod *int `url:"throttle_unauthenticated_packages_api_requests_per_period,omitempty" json:"throttle_unauthenticated_packages_api_requests_per_period,omitempty"` + ThrottleUnauthenticatedPeriodInSeconds *int `url:"throttle_unauthenticated_period_in_seconds,omitempty" json:"throttle_unauthenticated_period_in_seconds,omitempty"` + ThrottleUnauthenticatedRequestsPerPeriod *int `url:"throttle_unauthenticated_requests_per_period,omitempty" json:"throttle_unauthenticated_requests_per_period,omitempty"` + ThrottleUnauthenticatedWebEnabled *bool `url:"throttle_unauthenticated_web_enabled,omitempty" json:"throttle_unauthenticated_web_enabled,omitempty"` + ThrottleUnauthenticatedWebPeriodInSeconds *int `url:"throttle_unauthenticated_web_period_in_seconds,omitempty" json:"throttle_unauthenticated_web_period_in_seconds,omitempty"` + ThrottleUnauthenticatedWebRequestsPerPeriod *int `url:"throttle_unauthenticated_web_requests_per_period,omitempty" json:"throttle_unauthenticated_web_requests_per_period,omitempty"` + TimeTrackingLimitToHours *bool `url:"time_tracking_limit_to_hours,omitempty" json:"time_tracking_limit_to_hours,omitempty"` + TwoFactorGracePeriod *int `url:"two_factor_grace_period,omitempty" json:"two_factor_grace_period,omitempty"` + UniqueIPsLimitEnabled *bool `url:"unique_ips_limit_enabled,omitempty" json:"unique_ips_limit_enabled,omitempty"` + UniqueIPsLimitPerUser *int `url:"unique_ips_limit_per_user,omitempty" json:"unique_ips_limit_per_user,omitempty"` + UniqueIPsLimitTimeWindow *int `url:"unique_ips_limit_time_window,omitempty" json:"unique_ips_limit_time_window,omitempty"` + UpdatingNameDisabledForUsers *bool `url:"updating_name_disabled_for_users,omitempty" json:"updating_name_disabled_for_users,omitempty"` + UsagePingEnabled *bool `url:"usage_ping_enabled,omitempty" json:"usage_ping_enabled,omitempty"` + UsagePingFeaturesEnabled *bool `url:"usage_ping_features_enabled,omitempty" json:"usage_ping_features_enabled,omitempty"` + UserDeactivationEmailsEnabled *bool `url:"user_deactivation_emails_enabled,omitempty" json:"user_deactivation_emails_enabled,omitempty"` + UserDefaultExternal *bool `url:"user_default_external,omitempty" json:"user_default_external,omitempty"` + UserDefaultInternalRegex *string `url:"user_default_internal_regex,omitempty" json:"user_default_internal_regex,omitempty"` + UserEmailLookupLimit *int `url:"user_email_lookup_limit,omitempty" json:"user_email_lookup_limit,omitempty"` + UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"` + UserShowAddSSHKeyMessage *bool `url:"user_show_add_ssh_key_message,omitempty" json:"user_show_add_ssh_key_message,omitempty"` + UsersGetByIDLimit *int `url:"users_get_by_id_limit,omitempty" json:"users_get_by_id_limit,omitempty"` + UsersGetByIDLimitAllowlistRaw *string `url:"users_get_by_id_limit_allowlist_raw,omitempty" json:"users_get_by_id_limit_allowlist_raw,omitempty"` + VersionCheckEnabled *bool `url:"version_check_enabled,omitempty" json:"version_check_enabled,omitempty"` + WebIDEClientsidePreviewEnabled *bool `url:"web_ide_clientside_preview_enabled,omitempty" json:"web_ide_clientside_preview_enabled,omitempty"` + WhatsNewVariant *string `url:"whats_new_variant,omitempty" json:"whats_new_variant,omitempty"` + WikiPageMaxContentBytes *int `url:"wiki_page_max_content_bytes,omitempty" json:"wiki_page_max_content_bytes,omitempty"` } // UpdateSettings updates the application settings. diff --git a/vendor/github.com/xanzy/go-gitlab/topics.go b/vendor/github.com/xanzy/go-gitlab/topics.go index 3469d5f3d8..c9d8e5e6ae 100644 --- a/vendor/github.com/xanzy/go-gitlab/topics.go +++ b/vendor/github.com/xanzy/go-gitlab/topics.go @@ -39,6 +39,7 @@ type TopicsService struct { type Topic struct { ID int `json:"id"` Name string `json:"name"` + Title string `json:"title"` Description string `json:"description"` TotalProjectsCount uint64 `json:"total_projects_count"` AvatarURL string `json:"avatar_url"` @@ -101,6 +102,7 @@ func (s *TopicsService) GetTopic(topic int, options ...RequestOptionFunc) (*Topi // https://docs.gitlab.com/ee/api/topics.html#create-a-project-topic type CreateTopicOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Avatar *TopicAvatar `url:"-" json:"-"` } @@ -160,6 +162,7 @@ func (s *TopicsService) CreateTopic(opt *CreateTopicOptions, options ...RequestO // https://docs.gitlab.com/ee/api/topics.html#update-a-project-topic type UpdateTopicOptions struct { Name *string `url:"name,omitempty" json:"name,omitempty"` + Title *string `url:"title,omitempty" json:"title,omitempty"` Description *string `url:"description,omitempty" json:"description,omitempty"` Avatar *TopicAvatar `url:"-" json:"avatar,omitempty"` } diff --git a/vendor/github.com/xanzy/go-gitlab/types.go b/vendor/github.com/xanzy/go-gitlab/types.go index 3328309340..b2b72c0f01 100644 --- a/vendor/github.com/xanzy/go-gitlab/types.go +++ b/vendor/github.com/xanzy/go-gitlab/types.go @@ -438,6 +438,8 @@ type LicenseApprovalStatusValue string const ( LicenseApproved LicenseApprovalStatusValue = "approved" LicenseBlacklisted LicenseApprovalStatusValue = "blacklisted" + LicenseAllowed LicenseApprovalStatusValue = "allowed" + LicenseDenied LicenseApprovalStatusValue = "denied" ) // LicenseApprovalStatus is a helper routine that allocates a new license diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go index 2cf25fb00d..b170e280c4 100644 --- a/vendor/github.com/xanzy/go-gitlab/users.go +++ b/vendor/github.com/xanzy/go-gitlab/users.go @@ -207,6 +207,7 @@ type CreateUserOptions struct { External *bool `url:"external,omitempty" json:"external,omitempty"` PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` Note *string `url:"note,omitempty" json:"note,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` } // CreateUser creates a new user. Note only administrators can create new users. @@ -252,6 +253,7 @@ type ModifyUserOptions struct { External *bool `url:"external,omitempty" json:"external,omitempty"` PrivateProfile *bool `url:"private_profile,omitempty" json:"private_profile,omitempty"` Note *string `url:"note,omitempty" json:"note,omitempty"` + ThemeID *int `url:"theme_id,omitempty" json:"theme_id,omitempty"` PublicEmail *string `url:"public_email,omitempty" json:"public_email,omitempty"` } @@ -604,7 +606,7 @@ func (s *UsersService) ListGPGKeys(options ...RequestOptionFunc) ([]*GPGKey, *Re // // GitLab API docs: https://docs.gitlab.com/ce/api/users.html#get-a-specific-gpg-key func (s *UsersService) GetGPGKey(key int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { - u := fmt.Sprintf("users/gpg_keys/%d", key) + u := fmt.Sprintf("user/gpg_keys/%d", key) req, err := s.client.NewRequest(http.MethodGet, u, nil, options) if err != nil { @@ -649,7 +651,7 @@ func (s *UsersService) AddGPGKey(opt *AddGPGKeyOptions, options ...RequestOption // // GitLab API docs: https://docs.gitlab.com/ce/api/users.html#delete-a-gpg-key func (s *UsersService) DeleteGPGKey(key int, options ...RequestOptionFunc) (*Response, error) { - u := fmt.Sprintf("users/gpg_keys/%d", key) + u := fmt.Sprintf("user/gpg_keys/%d", key) req, err := s.client.NewRequest(http.MethodDelete, u, nil, options) if err != nil { @@ -704,10 +706,10 @@ func (s *UsersService) GetGPGKeyForUser(user, key int, options ...RequestOptionF // // GitLab API docs: // https://docs.gitlab.com/ce/api/users.html#add-a-gpg-key-for-a-given-user -func (s *UsersService) AddGPGKeyForUser(user int, options ...RequestOptionFunc) (*GPGKey, *Response, error) { +func (s *UsersService) AddGPGKeyForUser(user int, opt *AddGPGKeyOptions, options ...RequestOptionFunc) (*GPGKey, *Response, error) { u := fmt.Sprintf("users/%d/gpg_keys", user) - req, err := s.client.NewRequest(http.MethodPost, u, nil, options) + req, err := s.client.NewRequest(http.MethodPost, u, opt, options) if err != nil { return nil, nil, err } @@ -812,9 +814,10 @@ func (s *UsersService) GetEmail(email int, options ...RequestOptionFunc) (*Email // AddEmailOptions represents the available AddEmail() options. // -// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#add-email +// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-email type AddEmailOptions struct { - Email *string `url:"email,omitempty" json:"email,omitempty"` + Email *string `url:"email,omitempty" json:"email,omitempty"` + SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"` } // AddEmail creates a new email owned by the currently authenticated user. diff --git a/vendor/github.com/xanzy/go-gitlab/validate.go b/vendor/github.com/xanzy/go-gitlab/validate.go index 75b0c75033..23650f87ce 100644 --- a/vendor/github.com/xanzy/go-gitlab/validate.go +++ b/vendor/github.com/xanzy/go-gitlab/validate.go @@ -50,15 +50,21 @@ type ProjectLintResult struct { MergedYaml string `json:"merged_yaml"` } -// Lint validates .gitlab-ci.yml content. +// LintOptions represents the available Lint() options. // -// GitLab API docs: https://docs.gitlab.com/ce/api/lint.html -func (s *ValidateService) Lint(content string, options ...RequestOptionFunc) (*LintResult, *Response, error) { - var opts struct { - Content string `url:"content,omitempty" json:"content,omitempty"` - } - opts.Content = content +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration +type LintOptions struct { + Content string `url:"content,omitempty" json:"content,omitempty"` + IncludeMergedYAML bool `url:"include_merged_yaml,omitempty" json:"include_merged_yaml,omitempty"` + IncludeJobs bool `url:"include_jobs,omitempty" json:"include_jobs,omitempty"` +} +// Lint validates .gitlab-ci.yml content. +// +// Gitlab API docs: +// https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration +func (s *ValidateService) Lint(opts *LintOptions, options ...RequestOptionFunc) (*LintResult, *Response, error) { req, err := s.client.NewRequest(http.MethodPost, "ci/lint", &opts, options) if err != nil { return nil, nil, err diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go index b94a7bfd9d..d59e65813f 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go @@ -12,6 +12,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" membershippb "go.etcd.io/etcd/api/v3/membershippb" + _ "go.etcd.io/etcd/api/v3/versionpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -237,70 +238,73 @@ func init() { func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) } var fileDescriptor_b4c9a9be0cfca103 = []byte{ - // 1003 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xd9, 0x72, 0x1b, 0x45, - 0x14, 0x86, 0x23, 0xc5, 0x71, 0xac, 0x96, 0xed, 0x38, 0x6d, 0x87, 0x34, 0x72, 0x95, 0x70, 0x1c, - 0x12, 0xcc, 0x66, 0x53, 0xca, 0x03, 0x80, 0x90, 0x5c, 0x8e, 0xab, 0x42, 0x70, 0x4d, 0xcc, 0x52, - 0xc5, 0xc5, 0xd0, 0x9a, 0x39, 0x96, 0x06, 0xcf, 0x46, 0x77, 0x4b, 0x31, 0xef, 0x11, 0x28, 0x1e, - 0x83, 0xed, 0x21, 0x72, 0xc1, 0x62, 0xe0, 0x05, 0xc0, 0xdc, 0x70, 0x0f, 0xdc, 0x53, 0xbd, 0xcc, - 0x26, 0xb5, 0x7c, 0xa7, 0xf9, 0xcf, 0x7f, 0xbe, 0x73, 0xba, 0xe7, 0xf4, 0xa8, 0xd1, 0x3a, 0xa3, - 0x27, 0xc2, 0x0d, 0x62, 0x01, 0x2c, 0xa6, 0xe1, 0x6e, 0xca, 0x12, 0x91, 0xe0, 0x65, 0x10, 0x9e, - 0xcf, 0x81, 0x4d, 0x80, 0xa5, 0x83, 0xd6, 0xc6, 0x30, 0x19, 0x26, 0x2a, 0xb0, 0x27, 0x7f, 0x69, - 0x4f, 0x6b, 0xad, 0xf0, 0x18, 0xa5, 0xc1, 0x52, 0xcf, 0xfc, 0xbc, 0x2f, 0x83, 0x7b, 0x34, 0x0d, - 0xf6, 0x22, 0x88, 0x06, 0xc0, 0xf8, 0x28, 0x48, 0xd3, 0x41, 0xe9, 0x41, 0xfb, 0xb6, 0x3f, 0x45, - 0x2b, 0x0e, 0x7c, 0x3e, 0x06, 0x2e, 0x1e, 0x02, 0xf5, 0x81, 0xe1, 0x55, 0x54, 0x3f, 0xec, 0x93, - 0xda, 0x56, 0x6d, 0x67, 0xc1, 0xa9, 0x1f, 0xf6, 0x71, 0x0b, 0x2d, 0x8d, 0xb9, 0x6c, 0x2d, 0x02, - 0x52, 0xdf, 0xaa, 0xed, 0x34, 0x9c, 0xfc, 0x19, 0xdf, 0x45, 0x2b, 0x74, 0x2c, 0x46, 0x2e, 0x83, - 0x49, 0xc0, 0x83, 0x24, 0x26, 0x57, 0x55, 0xda, 0xb2, 0x14, 0x1d, 0xa3, 0x6d, 0x3f, 0xc3, 0x68, - 0xfd, 0xd0, 0xac, 0xce, 0xa1, 0x27, 0xc2, 0x94, 0xc3, 0x0f, 0xd0, 0xe2, 0x48, 0x95, 0x24, 0xfe, - 0x56, 0x6d, 0xa7, 0xd9, 0xd9, 0xdc, 0x2d, 0xaf, 0x79, 0xb7, 0xd2, 0x95, 0x63, 0xac, 0x33, 0xdd, - 0xdd, 0x43, 0xf5, 0x49, 0x47, 0xf5, 0xd5, 0xec, 0xdc, 0xb2, 0x02, 0x9c, 0xfa, 0xa4, 0x83, 0xdf, - 0x42, 0xd7, 0x18, 0x8d, 0x87, 0xa0, 0x1a, 0x6c, 0x76, 0x5a, 0x53, 0x4e, 0x19, 0xca, 0xec, 0xda, - 0x88, 0x5f, 0x43, 0x57, 0xd3, 0xb1, 0x20, 0x0b, 0xca, 0x4f, 0xaa, 0xfe, 0xa3, 0x71, 0xb6, 0x08, - 0x47, 0x9a, 0x70, 0x0f, 0x2d, 0xfb, 0x10, 0x82, 0x00, 0x57, 0x17, 0xb9, 0xa6, 0x92, 0xb6, 0xaa, - 0x49, 0x7d, 0xe5, 0xa8, 0x94, 0x6a, 0xfa, 0x85, 0x26, 0x0b, 0x8a, 0xb3, 0x98, 0x2c, 0xda, 0x0a, - 0x1e, 0x9f, 0xc5, 0x79, 0x41, 0x71, 0x16, 0xe3, 0xb7, 0x11, 0xf2, 0x92, 0x28, 0xa5, 0x9e, 0x90, - 0x9b, 0x7e, 0x5d, 0xa5, 0xbc, 0x54, 0x4d, 0xe9, 0xe5, 0xf1, 0x2c, 0xb3, 0x94, 0x82, 0xdf, 0x41, - 0xcd, 0x10, 0x28, 0x07, 0x77, 0xc8, 0x68, 0x2c, 0xc8, 0x92, 0x8d, 0xf0, 0x48, 0x1a, 0x0e, 0x64, - 0x3c, 0x27, 0x84, 0xb9, 0x24, 0xd7, 0xac, 0x09, 0x0c, 0x26, 0xc9, 0x29, 0x90, 0x86, 0x6d, 0xcd, - 0x0a, 0xe1, 0x28, 0x43, 0xbe, 0xe6, 0xb0, 0xd0, 0xe4, 0x6b, 0xa1, 0x21, 0x65, 0x11, 0x41, 0xb6, - 0xd7, 0xd2, 0x95, 0xa1, 0xfc, 0xb5, 0x28, 0x23, 0x7e, 0x1f, 0xad, 0xe9, 0xb2, 0xde, 0x08, 0xbc, - 0xd3, 0x34, 0x09, 0x62, 0x41, 0x9a, 0x2a, 0xf9, 0x65, 0x4b, 0xe9, 0x5e, 0x6e, 0xca, 0x30, 0x37, - 0xc2, 0xaa, 0x8e, 0xbb, 0xa8, 0xa9, 0x46, 0x18, 0x62, 0x3a, 0x08, 0x81, 0xfc, 0x6d, 0xdd, 0xcc, - 0xee, 0x58, 0x8c, 0xf6, 0x95, 0x21, 0xdf, 0x0a, 0x9a, 0x4b, 0xb8, 0x8f, 0xd4, 0xc0, 0xbb, 0x7e, - 0xc0, 0x15, 0xe3, 0x9f, 0xeb, 0xb6, 0xbd, 0x90, 0x8c, 0xbe, 0x76, 0xe4, 0x7b, 0x41, 0x0b, 0x2d, - 0x6f, 0x84, 0x0b, 0x2a, 0xc6, 0x9c, 0xfc, 0x37, 0xb7, 0x91, 0x27, 0xca, 0x50, 0x69, 0x44, 0x4b, - 0xf8, 0xb1, 0x6e, 0x04, 0x62, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x57, 0x33, 0x5e, 0xad, 0x32, 0xb2, - 0xb3, 0xd8, 0x2d, 0x59, 0x33, 0x5a, 0x25, 0x1f, 0xef, 0x9b, 0xe3, 0x2d, 0xcf, 0xbb, 0x4b, 0x7d, - 0x9f, 0xfc, 0xb8, 0x34, 0x6f, 0x65, 0x1f, 0x70, 0x60, 0x5d, 0xdf, 0xaf, 0xac, 0xcc, 0x68, 0xf8, - 0x31, 0x5a, 0x2b, 0x30, 0x7a, 0xe4, 0xc9, 0x4f, 0x9a, 0x74, 0xd7, 0x4e, 0x32, 0x67, 0xc5, 0xc0, - 0x56, 0x69, 0x45, 0xae, 0xb6, 0x35, 0x04, 0x41, 0x7e, 0xbe, 0xb4, 0xad, 0x03, 0x10, 0x33, 0x6d, - 0x1d, 0x80, 0xc0, 0x43, 0xf4, 0x62, 0x81, 0xf1, 0x46, 0xf2, 0x10, 0xba, 0x29, 0xe5, 0xfc, 0x69, - 0xc2, 0x7c, 0xf2, 0x8b, 0x46, 0xbe, 0x6e, 0x47, 0xf6, 0x94, 0xfb, 0xc8, 0x98, 0x33, 0xfa, 0x0b, - 0xd4, 0x1a, 0xc6, 0x1f, 0xa3, 0x8d, 0x52, 0xbf, 0xf2, 0xf4, 0xb8, 0x2c, 0x09, 0x81, 0x9c, 0xeb, - 0x1a, 0xf7, 0xe7, 0xb4, 0xad, 0x4e, 0x5e, 0x52, 0x4c, 0xcb, 0x4d, 0x3a, 0x1d, 0xc1, 0x9f, 0xa0, - 0x5b, 0x05, 0x59, 0x1f, 0x44, 0x8d, 0xfe, 0x55, 0xa3, 0x5f, 0xb1, 0xa3, 0xcd, 0x89, 0x2c, 0xb1, - 0x31, 0x9d, 0x09, 0xe1, 0x87, 0x68, 0xb5, 0x80, 0x87, 0x01, 0x17, 0xe4, 0x37, 0x4d, 0xbd, 0x63, - 0xa7, 0x3e, 0x0a, 0xb8, 0xa8, 0xcc, 0x51, 0x26, 0xe6, 0x24, 0xd9, 0x9a, 0x26, 0xfd, 0x3e, 0x97, - 0x24, 0x4b, 0xcf, 0x90, 0x32, 0x31, 0x7f, 0xf5, 0x8a, 0x24, 0x27, 0xf2, 0x9b, 0xc6, 0xbc, 0x57, - 0x2f, 0x73, 0xa6, 0x27, 0xd2, 0x68, 0xf9, 0x44, 0x2a, 0x8c, 0x99, 0xc8, 0x6f, 0x1b, 0xf3, 0x26, - 0x52, 0x66, 0x59, 0x26, 0xb2, 0x90, 0xab, 0x6d, 0xc9, 0x89, 0xfc, 0xee, 0xd2, 0xb6, 0xa6, 0x27, - 0xd2, 0x68, 0xf8, 0x33, 0xd4, 0x2a, 0x61, 0xd4, 0xa0, 0xa4, 0xc0, 0xa2, 0x80, 0xab, 0xff, 0xd6, - 0xef, 0x35, 0xf3, 0x8d, 0x39, 0x4c, 0x69, 0x3f, 0xca, 0xdd, 0x19, 0xff, 0x36, 0xb5, 0xc7, 0x71, - 0x84, 0x36, 0x8b, 0x5a, 0x66, 0x74, 0x4a, 0xc5, 0x7e, 0xd0, 0xc5, 0xde, 0xb4, 0x17, 0xd3, 0x53, - 0x32, 0x5b, 0x8d, 0xd0, 0x39, 0x06, 0xfc, 0x11, 0x5a, 0xf7, 0xc2, 0x31, 0x17, 0xc0, 0xdc, 0x09, - 0x30, 0x29, 0xb9, 0x1c, 0x04, 0x79, 0x86, 0xcc, 0x11, 0x28, 0x5f, 0x52, 0x76, 0x7b, 0xda, 0xf9, - 0xa1, 0x36, 0x3e, 0x29, 0x76, 0xeb, 0xa6, 0x37, 0x1d, 0xc1, 0x14, 0xdd, 0xce, 0xc0, 0x9a, 0xe1, - 0x52, 0x21, 0x98, 0x82, 0x7f, 0x89, 0xcc, 0xe7, 0xcf, 0x06, 0x7f, 0x4f, 0x69, 0x5d, 0x21, 0x58, - 0x89, 0xbf, 0xe1, 0x59, 0x82, 0xf8, 0x18, 0x61, 0x3f, 0x79, 0x1a, 0x0f, 0x19, 0xf5, 0xc1, 0x0d, - 0xe2, 0x93, 0x44, 0xd1, 0xbf, 0xd2, 0xf4, 0x7b, 0x55, 0x7a, 0x3f, 0x33, 0x1e, 0xc6, 0x27, 0x49, - 0x89, 0xbc, 0xe6, 0x4f, 0x05, 0xb6, 0x6f, 0xa0, 0x95, 0xfd, 0x28, 0x15, 0x5f, 0x38, 0xc0, 0xd3, - 0x24, 0xe6, 0xb0, 0x9d, 0xa2, 0xcd, 0x4b, 0x3e, 0xcd, 0x18, 0xa3, 0x05, 0x75, 0x07, 0xab, 0xa9, - 0x3b, 0x98, 0xfa, 0x2d, 0xef, 0x66, 0xf9, 0x17, 0xcb, 0xdc, 0xcd, 0xb2, 0x67, 0x7c, 0x07, 0x2d, - 0xf3, 0x20, 0x4a, 0x43, 0x70, 0x45, 0x72, 0x0a, 0xfa, 0x6a, 0xd6, 0x70, 0x9a, 0x5a, 0x3b, 0x96, - 0xd2, 0xbb, 0x1b, 0xcf, 0xff, 0x6c, 0x5f, 0x79, 0x7e, 0xd1, 0xae, 0x9d, 0x5f, 0xb4, 0x6b, 0x7f, - 0x5c, 0xb4, 0x6b, 0x5f, 0xff, 0xd5, 0xbe, 0x32, 0x58, 0x54, 0x17, 0xc3, 0x07, 0xff, 0x07, 0x00, - 0x00, 0xff, 0xff, 0x94, 0x6f, 0x64, 0x0a, 0x98, 0x0a, 0x00, 0x00, + // 1054 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0x5d, 0x6f, 0x1b, 0x45, + 0x14, 0xad, 0xd3, 0x34, 0x89, 0xc7, 0x49, 0x9a, 0x4e, 0x52, 0x3a, 0x38, 0x92, 0x71, 0x03, 0x2d, + 0x01, 0x8a, 0x53, 0x1c, 0x78, 0xe1, 0x05, 0x5c, 0x3b, 0x4a, 0x83, 0x4a, 0x15, 0x6d, 0x0b, 0xaa, + 0x84, 0xd0, 0x32, 0xde, 0xbd, 0xb1, 0xb7, 0x59, 0xef, 0x2e, 0x33, 0x63, 0x37, 0x7d, 0xe5, 0x91, + 0x67, 0x40, 0xfc, 0x0c, 0x3e, 0xff, 0x43, 0x85, 0xf8, 0x28, 0xf0, 0x07, 0x20, 0xbc, 0xf0, 0x0e, + 0xbc, 0xa3, 0xf9, 0xd8, 0x5d, 0xaf, 0x3d, 0xce, 0xdb, 0xfa, 0xde, 0x73, 0xcf, 0x39, 0x33, 0x73, + 0xef, 0x78, 0xd0, 0x3a, 0xa3, 0x47, 0xc2, 0x0d, 0x22, 0x01, 0x2c, 0xa2, 0x61, 0x23, 0x61, 0xb1, + 0x88, 0xf1, 0x32, 0x08, 0xcf, 0xe7, 0xc0, 0x46, 0xc0, 0x92, 0x6e, 0x75, 0xa3, 0x17, 0xf7, 0x62, + 0x95, 0xd8, 0x91, 0x5f, 0x1a, 0x53, 0x5d, 0xcb, 0x31, 0x26, 0x52, 0x66, 0x89, 0x67, 0x3e, 0xeb, + 0x32, 0xb9, 0x43, 0x93, 0x60, 0x67, 0x04, 0x8c, 0x07, 0x71, 0x94, 0x74, 0xd3, 0x2f, 0x83, 0xb8, + 0x9e, 0x21, 0x06, 0x30, 0xe8, 0x02, 0xe3, 0xfd, 0x20, 0x49, 0xba, 0x63, 0x3f, 0x34, 0x6e, 0x8b, + 0xa1, 0x15, 0x07, 0x3e, 0x1e, 0x02, 0x17, 0xb7, 0x81, 0xfa, 0xc0, 0xf0, 0x2a, 0x9a, 0x3b, 0xe8, + 0x90, 0x52, 0xbd, 0xb4, 0x3d, 0xef, 0xcc, 0x1d, 0x74, 0x70, 0x15, 0x2d, 0x0d, 0xb9, 0x34, 0x3f, + 0x00, 0x32, 0x57, 0x2f, 0x6d, 0x97, 0x9d, 0xec, 0x37, 0xbe, 0x81, 0x56, 0xe8, 0x50, 0xf4, 0x5d, + 0x06, 0xa3, 0x40, 0x6a, 0x93, 0xf3, 0xb2, 0xec, 0xd6, 0xe2, 0xa7, 0xdf, 0x93, 0xf3, 0xbb, 0x8d, + 0xd7, 0x9c, 0x65, 0x99, 0x75, 0x4c, 0xf2, 0xcd, 0xc5, 0x4f, 0x54, 0xf8, 0xe6, 0xd6, 0x0f, 0x18, + 0xad, 0x1f, 0x98, 0x1d, 0x71, 0xe8, 0x91, 0x30, 0x06, 0xf0, 0x2e, 0x5a, 0xe8, 0x2b, 0x13, 0xc4, + 0xaf, 0x97, 0xb6, 0x2b, 0xcd, 0xcd, 0xc6, 0xf8, 0x3e, 0x35, 0x0a, 0x3e, 0x1d, 0x03, 0x9d, 0xf2, + 0x7b, 0x0d, 0xcd, 0x8d, 0x9a, 0xca, 0x69, 0xa5, 0x79, 0xd9, 0x4a, 0xe0, 0xcc, 0x8d, 0x9a, 0xf8, + 0x26, 0xba, 0xc0, 0x68, 0xd4, 0x03, 0x65, 0xb9, 0xd2, 0xac, 0x4e, 0x20, 0x65, 0x2a, 0x85, 0x6b, + 0x20, 0x7e, 0x19, 0x9d, 0x4f, 0x86, 0x82, 0xcc, 0x2b, 0x3c, 0x29, 0xe2, 0x0f, 0x87, 0xe9, 0x22, + 0x1c, 0x09, 0xc2, 0x6d, 0xb4, 0xec, 0x43, 0x08, 0x02, 0x5c, 0x2d, 0x72, 0x41, 0x15, 0xd5, 0x8b, + 0x45, 0x1d, 0x85, 0x28, 0x48, 0x55, 0xfc, 0x3c, 0x26, 0x05, 0xc5, 0x49, 0x44, 0x16, 0x6c, 0x82, + 0xf7, 0x4f, 0xa2, 0x4c, 0x50, 0x9c, 0x44, 0xf8, 0x2d, 0x84, 0xbc, 0x78, 0x90, 0x50, 0x4f, 0xc8, + 0x63, 0x58, 0x54, 0x25, 0xcf, 0x15, 0x4b, 0xda, 0x59, 0x3e, 0xad, 0x1c, 0x2b, 0xc1, 0x6f, 0xa3, + 0x4a, 0x08, 0x94, 0x83, 0xdb, 0x63, 0x34, 0x12, 0x64, 0xc9, 0xc6, 0x70, 0x47, 0x02, 0xf6, 0x65, + 0x3e, 0x63, 0x08, 0xb3, 0x90, 0x5c, 0xb3, 0x66, 0x60, 0x30, 0x8a, 0x8f, 0x81, 0x94, 0x6d, 0x6b, + 0x56, 0x14, 0x8e, 0x02, 0x64, 0x6b, 0x0e, 0xf3, 0x98, 0x3c, 0x16, 0x1a, 0x52, 0x36, 0x20, 0xc8, + 0x76, 0x2c, 0x2d, 0x99, 0xca, 0x8e, 0x45, 0x01, 0xf1, 0x03, 0xb4, 0xa6, 0x65, 0xbd, 0x3e, 0x78, + 0xc7, 0x49, 0x1c, 0x44, 0x82, 0x54, 0x54, 0xf1, 0x0b, 0x16, 0xe9, 0x76, 0x06, 0x32, 0x34, 0x69, + 0xb3, 0xbe, 0xee, 0x5c, 0x0c, 0x8b, 0x00, 0xdc, 0x42, 0x15, 0xd5, 0xdd, 0x10, 0xd1, 0x6e, 0x08, + 0xe4, 0x6f, 0xeb, 0xae, 0xb6, 0x86, 0xa2, 0xbf, 0xa7, 0x00, 0xd9, 0x9e, 0xd0, 0x2c, 0x84, 0x3b, + 0x48, 0x8d, 0x80, 0xeb, 0x07, 0x5c, 0x71, 0xfc, 0xb3, 0x68, 0xdb, 0x14, 0xc9, 0xd1, 0xd1, 0x88, + 0x6c, 0x53, 0x68, 0x1e, 0xc3, 0xef, 0x18, 0x23, 0x5c, 0x50, 0x31, 0xe4, 0xe4, 0xbf, 0x99, 0x46, + 0xee, 0x29, 0xc0, 0xc4, 0xca, 0xde, 0xd0, 0x8e, 0x74, 0x0e, 0xdf, 0xd5, 0x8e, 0x20, 0x12, 0x81, + 0x47, 0x05, 0x90, 0x7f, 0x35, 0xd9, 0x4b, 0x45, 0xb2, 0x74, 0x3a, 0x5b, 0x63, 0xd0, 0xd4, 0x5a, + 0xa1, 0x1e, 0xef, 0x99, 0x2b, 0x40, 0xde, 0x09, 0x2e, 0xf5, 0x7d, 0xf2, 0xe3, 0xd2, 0xac, 0x25, + 0xbe, 0xc7, 0x81, 0xb5, 0x7c, 0xbf, 0xb0, 0x44, 0x13, 0xc3, 0x77, 0xd1, 0x5a, 0x4e, 0xa3, 0x87, + 0x80, 0xfc, 0xa4, 0x99, 0x9e, 0xb7, 0x33, 0x99, 0xe9, 0x31, 0x64, 0xab, 0xb4, 0x10, 0x2e, 0xda, + 0xea, 0x81, 0x20, 0x3f, 0x9f, 0x69, 0x6b, 0x1f, 0xc4, 0x94, 0xad, 0x7d, 0x10, 0xb8, 0x87, 0x9e, + 0xcd, 0x69, 0xbc, 0xbe, 0x1c, 0x4b, 0x37, 0xa1, 0x9c, 0x3f, 0x8a, 0x99, 0x4f, 0x7e, 0xd1, 0x94, + 0xaf, 0xd8, 0x29, 0xdb, 0x0a, 0x7d, 0x68, 0xc0, 0x29, 0xfb, 0x33, 0xd4, 0x9a, 0xc6, 0x0f, 0xd0, + 0xc6, 0x98, 0x5f, 0x39, 0x4f, 0x2e, 0x8b, 0x43, 0x20, 0x4f, 0xb5, 0xc6, 0xf5, 0x19, 0xb6, 0xd5, + 0x2c, 0xc6, 0x79, 0xdb, 0x5c, 0xa2, 0x93, 0x19, 0xfc, 0x01, 0xba, 0x9c, 0x33, 0xeb, 0xd1, 0xd4, + 0xd4, 0xbf, 0x6a, 0xea, 0x17, 0xed, 0xd4, 0x66, 0x46, 0xc7, 0xb8, 0x31, 0x9d, 0x4a, 0xe1, 0xdb, + 0x68, 0x35, 0x27, 0x0f, 0x03, 0x2e, 0xc8, 0x6f, 0x9a, 0xf5, 0xaa, 0x9d, 0xf5, 0x4e, 0xc0, 0x45, + 0xa1, 0x8f, 0xd2, 0x60, 0xc6, 0x24, 0xad, 0x69, 0xa6, 0xdf, 0x67, 0x32, 0x49, 0xe9, 0x29, 0xa6, + 0x34, 0x98, 0x1d, 0xbd, 0x62, 0x92, 0x1d, 0xf9, 0x55, 0x79, 0xd6, 0xd1, 0xcb, 0x9a, 0xc9, 0x8e, + 0x34, 0xb1, 0xac, 0x23, 0x15, 0x8d, 0xe9, 0xc8, 0xaf, 0xcb, 0xb3, 0x3a, 0x52, 0x56, 0x59, 0x3a, + 0x32, 0x0f, 0x17, 0x6d, 0xc9, 0x8e, 0xfc, 0xe6, 0x4c, 0x5b, 0x93, 0x1d, 0x69, 0x62, 0xf8, 0x21, + 0xaa, 0x8e, 0xd1, 0xa8, 0x46, 0x49, 0x80, 0x0d, 0x02, 0xae, 0xfe, 0x7f, 0xbf, 0xd5, 0x9c, 0x37, + 0x66, 0x70, 0x4a, 0xf8, 0x61, 0x86, 0x4e, 0xf9, 0xaf, 0x50, 0x7b, 0x1e, 0x0f, 0xd0, 0x66, 0xae, + 0x65, 0x5a, 0x67, 0x4c, 0xec, 0x3b, 0x2d, 0xf6, 0xaa, 0x5d, 0x4c, 0x77, 0xc9, 0xb4, 0x1a, 0xa1, + 0x33, 0x00, 0xf8, 0x23, 0xb4, 0xee, 0x85, 0x43, 0x2e, 0x80, 0xb9, 0xe6, 0x2d, 0xe3, 0x72, 0x10, + 0xe4, 0x33, 0x64, 0x46, 0x60, 0xfc, 0x21, 0xd3, 0x68, 0x6b, 0xe4, 0xfb, 0x1a, 0x78, 0x0f, 0xc4, + 0xd4, 0xad, 0x77, 0xc9, 0x9b, 0x84, 0xe0, 0x87, 0xe8, 0x4a, 0xaa, 0xa0, 0xc9, 0x5c, 0x2a, 0x04, + 0x53, 0x2a, 0x9f, 0x23, 0x73, 0x0f, 0xda, 0x54, 0xde, 0x55, 0xb1, 0x96, 0x10, 0xcc, 0x26, 0xb4, + 0xe1, 0x59, 0x50, 0xf8, 0x43, 0x84, 0xfd, 0xf8, 0x51, 0xd4, 0x63, 0xd4, 0x07, 0x37, 0x88, 0x8e, + 0x62, 0x25, 0xf3, 0x85, 0x96, 0xb9, 0x56, 0x94, 0xe9, 0xa4, 0xc0, 0x83, 0xe8, 0x28, 0xb6, 0x49, + 0xac, 0xf9, 0x13, 0x88, 0xfc, 0x31, 0x75, 0x11, 0xad, 0xec, 0x0d, 0x12, 0xf1, 0xd8, 0x01, 0x9e, + 0xc4, 0x11, 0x87, 0xad, 0xc7, 0x68, 0xf3, 0x8c, 0xeb, 0x1b, 0x63, 0x34, 0xaf, 0xde, 0x72, 0x25, + 0xf5, 0x96, 0x53, 0xdf, 0xf2, 0x8d, 0x97, 0xdd, 0x6a, 0xe6, 0x8d, 0x97, 0xfe, 0xc6, 0x57, 0xd1, + 0x32, 0x0f, 0x06, 0x49, 0x08, 0xae, 0x88, 0x8f, 0x41, 0x3f, 0xf1, 0xca, 0x4e, 0x45, 0xc7, 0xee, + 0xcb, 0x50, 0xe6, 0xe5, 0xd6, 0xc6, 0x93, 0x3f, 0x6b, 0xe7, 0x9e, 0x9c, 0xd6, 0x4a, 0x4f, 0x4f, + 0x6b, 0xa5, 0x3f, 0x4e, 0x6b, 0xa5, 0x2f, 0xff, 0xaa, 0x9d, 0xeb, 0x2e, 0xa8, 0x97, 0xe6, 0xee, + 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x30, 0x36, 0x53, 0xc6, 0x0b, 0x0b, 0x00, 0x00, } func (m *RequestHeader) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto index 68926e59f6..f1036b9f61 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto @@ -4,6 +4,7 @@ package etcdserverpb; import "gogoproto/gogo.proto"; import "etcdserver.proto"; import "rpc.proto"; +import "etcd/api/versionpb/version.proto"; import "etcd/api/membershippb/membership.proto"; option (gogoproto.marshaler_all) = true; @@ -12,16 +13,20 @@ option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; message RequestHeader { + option (versionpb.etcd_version_msg) = "3.0"; + uint64 ID = 1; // username is a username that is associated with an auth token of gRPC connection string username = 2; // auth_revision is a revision number of auth.authStore. It is not related to mvcc - uint64 auth_revision = 3; + uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"]; } // An InternalRaftRequest is the union of all requests which can be // sent via raft. message InternalRaftRequest { + option (versionpb.etcd_version_msg) = "3.0"; + RequestHeader header = 100; uint64 ID = 1; @@ -38,11 +43,11 @@ message InternalRaftRequest { AlarmRequest alarm = 10; - LeaseCheckpointRequest lease_checkpoint = 11; + LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"]; AuthEnableRequest auth_enable = 1000; AuthDisableRequest auth_disable = 1011; - AuthStatusRequest auth_status = 1013; + AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"]; InternalAuthenticateRequest authenticate = 1012; @@ -61,9 +66,9 @@ message InternalRaftRequest { AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203; AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204; - membershippb.ClusterVersionSetRequest cluster_version_set = 1300; - membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301; - membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302; + membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"]; + membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"]; + membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"]; } message EmptyResponse { @@ -73,6 +78,7 @@ message EmptyResponse { // InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing. // For avoiding misusage the field, we have an internal version of AuthenticateRequest. message InternalAuthenticateRequest { + option (versionpb.etcd_version_msg) = "3.0"; string name = 1; string password = 2; diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go index 34c1824426..46a8889d12 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go @@ -14,6 +14,7 @@ import ( proto "github.com/golang/protobuf/proto" authpb "go.etcd.io/etcd/api/v3/authpb" mvccpb "go.etcd.io/etcd/api/v3/mvccpb" + _ "go.etcd.io/etcd/api/v3/versionpb" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -276,7 +277,7 @@ type ResponseHeader struct { MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"` // revision is the key-value store revision when the request was applied. // For watch progress responses, the header.revision indicates progress. All future events - // recieved in this stream are guaranteed to have a higher revision number than the + // received in this stream are guaranteed to have a higher revision number than the // header.revision number. Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"` // raft_term is the raft term when the request was applied. @@ -1792,7 +1793,11 @@ type SnapshotResponse struct { // remaining_bytes is the number of blob bytes to be sent after this message RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"` // blob contains the next chunk of the snapshot in the snapshot stream. - Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` + Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"` + // local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1852,6 +1857,13 @@ func (m *SnapshotResponse) GetBlob() []byte { return nil } +func (m *SnapshotResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -6154,264 +6166,282 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 4107 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0xc9, - 0x75, 0xe6, 0x00, 0xc4, 0xed, 0xe0, 0x42, 0xb0, 0x79, 0x11, 0x84, 0x95, 0x28, 0x6e, 0x6b, 0xa5, - 0xe5, 0x4a, 0xbb, 0xc4, 0x9a, 0xb6, 0xb3, 0x55, 0x4a, 0xe2, 0x18, 0x22, 0xb1, 0x12, 0x97, 0x14, - 0xc9, 0x1d, 0x42, 0xda, 0x4b, 0xb9, 0xc2, 0x1a, 0x02, 0x2d, 0x72, 0x42, 0x60, 0x06, 0x9e, 0x19, - 0x40, 0xe4, 0xe6, 0xe2, 0x94, 0xcb, 0x71, 0x25, 0xaf, 0x76, 0x55, 0x2a, 0x79, 0x48, 0x5e, 0x52, - 0x29, 0x97, 0x1f, 0xfc, 0x9c, 0xbf, 0x90, 0xa7, 0x5c, 0x2a, 0x7f, 0x20, 0xb5, 0xf1, 0x4b, 0xf2, - 0x23, 0x52, 0xae, 0xbe, 0xcd, 0xf4, 0xdc, 0x40, 0xd9, 0xd8, 0xdd, 0x17, 0x11, 0x7d, 0xfa, 0xf4, - 0xf9, 0x4e, 0x9f, 0xee, 0x3e, 0xe7, 0xf4, 0xe9, 0x11, 0x94, 0x9c, 0x51, 0x6f, 0x73, 0xe4, 0xd8, - 0x9e, 0x8d, 0x2a, 0xc4, 0xeb, 0xf5, 0x5d, 0xe2, 0x4c, 0x88, 0x33, 0x3a, 0x6d, 0x2e, 0x9f, 0xd9, - 0x67, 0x36, 0xeb, 0x68, 0xd1, 0x5f, 0x9c, 0xa7, 0xd9, 0xa0, 0x3c, 0x2d, 0x63, 0x64, 0xb6, 0x86, - 0x93, 0x5e, 0x6f, 0x74, 0xda, 0xba, 0x98, 0x88, 0x9e, 0xa6, 0xdf, 0x63, 0x8c, 0xbd, 0xf3, 0xd1, - 0x29, 0xfb, 0x23, 0xfa, 0x6e, 0x9d, 0xd9, 0xf6, 0xd9, 0x80, 0xf0, 0x5e, 0xcb, 0xb2, 0x3d, 0xc3, - 0x33, 0x6d, 0xcb, 0xe5, 0xbd, 0xf8, 0xaf, 0x34, 0xa8, 0xe9, 0xc4, 0x1d, 0xd9, 0x96, 0x4b, 0x9e, - 0x12, 0xa3, 0x4f, 0x1c, 0x74, 0x1b, 0xa0, 0x37, 0x18, 0xbb, 0x1e, 0x71, 0x4e, 0xcc, 0x7e, 0x43, - 0x5b, 0xd7, 0x36, 0xe6, 0xf5, 0x92, 0xa0, 0xec, 0xf6, 0xd1, 0x1b, 0x50, 0x1a, 0x92, 0xe1, 0x29, - 0xef, 0xcd, 0xb0, 0xde, 0x22, 0x27, 0xec, 0xf6, 0x51, 0x13, 0x8a, 0x0e, 0x99, 0x98, 0xae, 0x69, - 0x5b, 0x8d, 0xec, 0xba, 0xb6, 0x91, 0xd5, 0xfd, 0x36, 0x1d, 0xe8, 0x18, 0x2f, 0xbd, 0x13, 0x8f, - 0x38, 0xc3, 0xc6, 0x3c, 0x1f, 0x48, 0x09, 0x5d, 0xe2, 0x0c, 0xf1, 0x4f, 0x72, 0x50, 0xd1, 0x0d, - 0xeb, 0x8c, 0xe8, 0xe4, 0x87, 0x63, 0xe2, 0x7a, 0xa8, 0x0e, 0xd9, 0x0b, 0x72, 0xc5, 0xe0, 0x2b, - 0x3a, 0xfd, 0xc9, 0xc7, 0x5b, 0x67, 0xe4, 0x84, 0x58, 0x1c, 0xb8, 0x42, 0xc7, 0x5b, 0x67, 0xa4, - 0x63, 0xf5, 0xd1, 0x32, 0xe4, 0x06, 0xe6, 0xd0, 0xf4, 0x04, 0x2a, 0x6f, 0x84, 0xd4, 0x99, 0x8f, - 0xa8, 0xb3, 0x0d, 0xe0, 0xda, 0x8e, 0x77, 0x62, 0x3b, 0x7d, 0xe2, 0x34, 0x72, 0xeb, 0xda, 0x46, - 0x6d, 0xeb, 0xad, 0x4d, 0x75, 0x19, 0x36, 0x55, 0x85, 0x36, 0x8f, 0x6d, 0xc7, 0x3b, 0xa4, 0xbc, - 0x7a, 0xc9, 0x95, 0x3f, 0xd1, 0x87, 0x50, 0x66, 0x42, 0x3c, 0xc3, 0x39, 0x23, 0x5e, 0x23, 0xcf, - 0xa4, 0xdc, 0xbb, 0x46, 0x4a, 0x97, 0x31, 0xeb, 0x0c, 0x9e, 0xff, 0x46, 0x18, 0x2a, 0x2e, 0x71, - 0x4c, 0x63, 0x60, 0x7e, 0x61, 0x9c, 0x0e, 0x48, 0xa3, 0xb0, 0xae, 0x6d, 0x14, 0xf5, 0x10, 0x8d, - 0xce, 0xff, 0x82, 0x5c, 0xb9, 0x27, 0xb6, 0x35, 0xb8, 0x6a, 0x14, 0x19, 0x43, 0x91, 0x12, 0x0e, - 0xad, 0xc1, 0x15, 0x5b, 0x34, 0x7b, 0x6c, 0x79, 0xbc, 0xb7, 0xc4, 0x7a, 0x4b, 0x8c, 0xc2, 0xba, - 0x37, 0xa0, 0x3e, 0x34, 0xad, 0x93, 0xa1, 0xdd, 0x3f, 0xf1, 0x0d, 0x02, 0xcc, 0x20, 0xb5, 0xa1, - 0x69, 0x3d, 0xb3, 0xfb, 0xba, 0x34, 0x0b, 0xe5, 0x34, 0x2e, 0xc3, 0x9c, 0x65, 0xc1, 0x69, 0x5c, - 0xaa, 0x9c, 0x9b, 0xb0, 0x44, 0x65, 0xf6, 0x1c, 0x62, 0x78, 0x24, 0x60, 0xae, 0x30, 0xe6, 0xc5, - 0xa1, 0x69, 0x6d, 0xb3, 0x9e, 0x10, 0xbf, 0x71, 0x19, 0xe3, 0xaf, 0x0a, 0x7e, 0xe3, 0x32, 0xcc, - 0x8f, 0x37, 0xa1, 0xe4, 0xdb, 0x1c, 0x15, 0x61, 0xfe, 0xe0, 0xf0, 0xa0, 0x53, 0x9f, 0x43, 0x00, - 0xf9, 0xf6, 0xf1, 0x76, 0xe7, 0x60, 0xa7, 0xae, 0xa1, 0x32, 0x14, 0x76, 0x3a, 0xbc, 0x91, 0xc1, - 0x8f, 0x01, 0x02, 0xeb, 0xa2, 0x02, 0x64, 0xf7, 0x3a, 0x9f, 0xd5, 0xe7, 0x28, 0xcf, 0x8b, 0x8e, - 0x7e, 0xbc, 0x7b, 0x78, 0x50, 0xd7, 0xe8, 0xe0, 0x6d, 0xbd, 0xd3, 0xee, 0x76, 0xea, 0x19, 0xca, - 0xf1, 0xec, 0x70, 0xa7, 0x9e, 0x45, 0x25, 0xc8, 0xbd, 0x68, 0xef, 0x3f, 0xef, 0xd4, 0xe7, 0xf1, - 0xcf, 0x35, 0xa8, 0x8a, 0xf5, 0xe2, 0x67, 0x02, 0x7d, 0x07, 0xf2, 0xe7, 0xec, 0x5c, 0xb0, 0xad, - 0x58, 0xde, 0xba, 0x15, 0x59, 0xdc, 0xd0, 0xd9, 0xd1, 0x05, 0x2f, 0xc2, 0x90, 0xbd, 0x98, 0xb8, - 0x8d, 0xcc, 0x7a, 0x76, 0xa3, 0xbc, 0x55, 0xdf, 0xe4, 0xe7, 0x75, 0x73, 0x8f, 0x5c, 0xbd, 0x30, - 0x06, 0x63, 0xa2, 0xd3, 0x4e, 0x84, 0x60, 0x7e, 0x68, 0x3b, 0x84, 0xed, 0xd8, 0xa2, 0xce, 0x7e, - 0xd3, 0x6d, 0xcc, 0x16, 0x4d, 0xec, 0x56, 0xde, 0xc0, 0xbf, 0xd4, 0x00, 0x8e, 0xc6, 0x5e, 0xfa, - 0xd1, 0x58, 0x86, 0xdc, 0x84, 0x0a, 0x16, 0xc7, 0x82, 0x37, 0xd8, 0x99, 0x20, 0x86, 0x4b, 0xfc, - 0x33, 0x41, 0x1b, 0xe8, 0x06, 0x14, 0x46, 0x0e, 0x99, 0x9c, 0x5c, 0x4c, 0x18, 0x48, 0x51, 0xcf, - 0xd3, 0xe6, 0xde, 0x04, 0xbd, 0x09, 0x15, 0xf3, 0xcc, 0xb2, 0x1d, 0x72, 0xc2, 0x65, 0xe5, 0x58, - 0x6f, 0x99, 0xd3, 0x98, 0xde, 0x0a, 0x0b, 0x17, 0x9c, 0x57, 0x59, 0xf6, 0x29, 0x09, 0x5b, 0x50, - 0x66, 0xaa, 0xce, 0x64, 0xbe, 0x77, 0x02, 0x1d, 0x33, 0x6c, 0x58, 0xdc, 0x84, 0x42, 0x6b, 0xfc, - 0x03, 0x40, 0x3b, 0x64, 0x40, 0x3c, 0x32, 0x8b, 0xf7, 0x50, 0x6c, 0x92, 0x55, 0x6d, 0x82, 0x7f, - 0xa6, 0xc1, 0x52, 0x48, 0xfc, 0x4c, 0xd3, 0x6a, 0x40, 0xa1, 0xcf, 0x84, 0x71, 0x0d, 0xb2, 0xba, - 0x6c, 0xa2, 0x87, 0x50, 0x14, 0x0a, 0xb8, 0x8d, 0x6c, 0xca, 0xa6, 0x29, 0x70, 0x9d, 0x5c, 0xfc, - 0xcb, 0x0c, 0x94, 0xc4, 0x44, 0x0f, 0x47, 0xa8, 0x0d, 0x55, 0x87, 0x37, 0x4e, 0xd8, 0x7c, 0x84, - 0x46, 0xcd, 0x74, 0x27, 0xf4, 0x74, 0x4e, 0xaf, 0x88, 0x21, 0x8c, 0x8c, 0x7e, 0x1f, 0xca, 0x52, - 0xc4, 0x68, 0xec, 0x09, 0x93, 0x37, 0xc2, 0x02, 0x82, 0xfd, 0xf7, 0x74, 0x4e, 0x07, 0xc1, 0x7e, - 0x34, 0xf6, 0x50, 0x17, 0x96, 0xe5, 0x60, 0x3e, 0x1b, 0xa1, 0x46, 0x96, 0x49, 0x59, 0x0f, 0x4b, - 0x89, 0x2f, 0xd5, 0xd3, 0x39, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0xaa, 0x92, 0x77, 0xc9, 0x9d, 0x77, - 0x4c, 0xa5, 0xee, 0xa5, 0x15, 0x57, 0xa9, 0x7b, 0x69, 0x3d, 0x2e, 0x41, 0x41, 0xb4, 0xf0, 0xbf, - 0x64, 0x00, 0xe4, 0x6a, 0x1c, 0x8e, 0xd0, 0x0e, 0xd4, 0x1c, 0xd1, 0x0a, 0x59, 0xeb, 0x8d, 0x44, - 0x6b, 0x89, 0x45, 0x9c, 0xd3, 0xab, 0x72, 0x10, 0x57, 0xee, 0x7b, 0x50, 0xf1, 0xa5, 0x04, 0x06, - 0xbb, 0x99, 0x60, 0x30, 0x5f, 0x42, 0x59, 0x0e, 0xa0, 0x26, 0xfb, 0x04, 0x56, 0xfc, 0xf1, 0x09, - 0x36, 0x7b, 0x73, 0x8a, 0xcd, 0x7c, 0x81, 0x4b, 0x52, 0x82, 0x6a, 0x35, 0x55, 0xb1, 0xc0, 0x6c, - 0x37, 0x13, 0xcc, 0x16, 0x57, 0x8c, 0x1a, 0x0e, 0x68, 0xbc, 0xe4, 0x4d, 0xfc, 0xbf, 0x59, 0x28, - 0x6c, 0xdb, 0xc3, 0x91, 0xe1, 0xd0, 0xd5, 0xc8, 0x3b, 0xc4, 0x1d, 0x0f, 0x3c, 0x66, 0xae, 0xda, - 0xd6, 0xdd, 0xb0, 0x44, 0xc1, 0x26, 0xff, 0xea, 0x8c, 0x55, 0x17, 0x43, 0xe8, 0x60, 0x11, 0x1e, - 0x33, 0xaf, 0x31, 0x58, 0x04, 0x47, 0x31, 0x44, 0x1e, 0xe4, 0x6c, 0x70, 0x90, 0x9b, 0x50, 0x98, - 0x10, 0x27, 0x08, 0xe9, 0x4f, 0xe7, 0x74, 0x49, 0x40, 0xef, 0xc0, 0x42, 0x34, 0xbc, 0xe4, 0x04, - 0x4f, 0xad, 0x17, 0x8e, 0x46, 0x77, 0xa1, 0x12, 0x8a, 0x71, 0x79, 0xc1, 0x57, 0x1e, 0x2a, 0x21, - 0x6e, 0x55, 0xfa, 0x55, 0x1a, 0x8f, 0x2b, 0x4f, 0xe7, 0xa4, 0x67, 0x5d, 0x95, 0x9e, 0xb5, 0x28, - 0x46, 0x09, 0xdf, 0x1a, 0x72, 0x32, 0xdf, 0x0f, 0x3b, 0x19, 0xfc, 0x7d, 0xa8, 0x86, 0x0c, 0x44, - 0xe3, 0x4e, 0xe7, 0xe3, 0xe7, 0xed, 0x7d, 0x1e, 0xa4, 0x9e, 0xb0, 0xb8, 0xa4, 0xd7, 0x35, 0x1a, - 0xeb, 0xf6, 0x3b, 0xc7, 0xc7, 0xf5, 0x0c, 0xaa, 0x42, 0xe9, 0xe0, 0xb0, 0x7b, 0xc2, 0xb9, 0xb2, - 0xf8, 0x89, 0x2f, 0x41, 0x04, 0x39, 0x25, 0xb6, 0xcd, 0x29, 0xb1, 0x4d, 0x93, 0xb1, 0x2d, 0x13, - 0xc4, 0x36, 0x16, 0xe6, 0xf6, 0x3b, 0xed, 0xe3, 0x4e, 0x7d, 0xfe, 0x71, 0x0d, 0x2a, 0xdc, 0xbe, - 0x27, 0x63, 0x8b, 0x86, 0xda, 0x7f, 0xd2, 0x00, 0x82, 0xd3, 0x84, 0x5a, 0x50, 0xe8, 0x71, 0x9c, - 0x86, 0xc6, 0x9c, 0xd1, 0x4a, 0xe2, 0x92, 0xe9, 0x92, 0x0b, 0x7d, 0x0b, 0x0a, 0xee, 0xb8, 0xd7, - 0x23, 0xae, 0x0c, 0x79, 0x37, 0xa2, 0xfe, 0x50, 0x78, 0x2b, 0x5d, 0xf2, 0xd1, 0x21, 0x2f, 0x0d, - 0x73, 0x30, 0x66, 0x01, 0x70, 0xfa, 0x10, 0xc1, 0x87, 0xff, 0x5e, 0x83, 0xb2, 0xb2, 0x79, 0x7f, - 0x47, 0x27, 0x7c, 0x0b, 0x4a, 0x4c, 0x07, 0xd2, 0x17, 0x6e, 0xb8, 0xa8, 0x07, 0x04, 0xf4, 0x7b, - 0x50, 0x92, 0x27, 0x40, 0x7a, 0xe2, 0x46, 0xb2, 0xd8, 0xc3, 0x91, 0x1e, 0xb0, 0xe2, 0x3d, 0x58, - 0x64, 0x56, 0xe9, 0xd1, 0xe4, 0x5a, 0xda, 0x51, 0x4d, 0x3f, 0xb5, 0x48, 0xfa, 0xd9, 0x84, 0xe2, - 0xe8, 0xfc, 0xca, 0x35, 0x7b, 0xc6, 0x40, 0x68, 0xe1, 0xb7, 0xf1, 0x47, 0x80, 0x54, 0x61, 0xb3, - 0x4c, 0x17, 0x57, 0xa1, 0xfc, 0xd4, 0x70, 0xcf, 0x85, 0x4a, 0xf8, 0x21, 0x54, 0x69, 0x73, 0xef, - 0xc5, 0x6b, 0xe8, 0xc8, 0x2e, 0x07, 0x92, 0x7b, 0x26, 0x9b, 0x23, 0x98, 0x3f, 0x37, 0xdc, 0x73, - 0x36, 0xd1, 0xaa, 0xce, 0x7e, 0xa3, 0x77, 0xa0, 0xde, 0xe3, 0x93, 0x3c, 0x89, 0x5c, 0x19, 0x16, - 0x04, 0xdd, 0xcf, 0x04, 0x3f, 0x85, 0x0a, 0x9f, 0xc3, 0x57, 0xad, 0x04, 0x5e, 0x84, 0x85, 0x63, - 0xcb, 0x18, 0xb9, 0xe7, 0xb6, 0x8c, 0x6e, 0x74, 0xd2, 0xf5, 0x80, 0x36, 0x13, 0xe2, 0xdb, 0xb0, - 0xe0, 0x90, 0xa1, 0x61, 0x5a, 0xa6, 0x75, 0x76, 0x72, 0x7a, 0xe5, 0x11, 0x57, 0x5c, 0x98, 0x6a, - 0x3e, 0xf9, 0x31, 0xa5, 0x52, 0xd5, 0x4e, 0x07, 0xf6, 0xa9, 0x70, 0x73, 0xec, 0x37, 0xfe, 0x69, - 0x06, 0x2a, 0x9f, 0x18, 0x5e, 0x4f, 0x2e, 0x1d, 0xda, 0x85, 0x9a, 0xef, 0xdc, 0x18, 0x45, 0xe8, - 0x12, 0x09, 0xb1, 0x6c, 0x8c, 0x4c, 0xa5, 0x65, 0x74, 0xac, 0xf6, 0x54, 0x02, 0x13, 0x65, 0x58, - 0x3d, 0x32, 0xf0, 0x45, 0x65, 0xd2, 0x45, 0x31, 0x46, 0x55, 0x94, 0x4a, 0x40, 0x87, 0x50, 0x1f, - 0x39, 0xf6, 0x99, 0x43, 0x5c, 0xd7, 0x17, 0xc6, 0xc3, 0x18, 0x4e, 0x10, 0x76, 0x24, 0x58, 0x03, - 0x71, 0x0b, 0xa3, 0x30, 0xe9, 0xf1, 0x42, 0x90, 0xcf, 0x70, 0xe7, 0xf4, 0x9f, 0x19, 0x40, 0xf1, - 0x49, 0xfd, 0xb6, 0x29, 0xde, 0x3d, 0xa8, 0xb9, 0x9e, 0xe1, 0xc4, 0x36, 0x5b, 0x95, 0x51, 0x7d, - 0x8f, 0xff, 0x36, 0xf8, 0x0a, 0x9d, 0x58, 0xb6, 0x67, 0xbe, 0xbc, 0x12, 0x59, 0x72, 0x4d, 0x92, - 0x0f, 0x18, 0x15, 0x75, 0xa0, 0xf0, 0xd2, 0x1c, 0x78, 0xc4, 0x71, 0x1b, 0xb9, 0xf5, 0xec, 0x46, - 0x6d, 0xeb, 0xe1, 0x75, 0xcb, 0xb0, 0xf9, 0x21, 0xe3, 0xef, 0x5e, 0x8d, 0x88, 0x2e, 0xc7, 0xaa, - 0x99, 0x67, 0x3e, 0x94, 0x8d, 0xdf, 0x84, 0xe2, 0x2b, 0x2a, 0x82, 0xde, 0xb2, 0x0b, 0x3c, 0x59, - 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0xe9, 0x18, 0x67, 0x43, 0x62, 0x79, 0xf2, 0x1e, 0x28, 0xdb, 0xf8, - 0x1e, 0x40, 0x00, 0x43, 0x5d, 0xfe, 0xc1, 0xe1, 0xd1, 0xf3, 0x6e, 0x7d, 0x0e, 0x55, 0xa0, 0x78, - 0x70, 0xb8, 0xd3, 0xd9, 0xef, 0xd0, 0xf8, 0x80, 0x5b, 0xd2, 0xa4, 0xa1, 0xb5, 0x54, 0x31, 0xb5, - 0x10, 0x26, 0x5e, 0x85, 0xe5, 0xa4, 0x05, 0xa4, 0xb9, 0x68, 0x55, 0xec, 0xd2, 0x99, 0x8e, 0x8a, - 0x0a, 0x9d, 0x09, 0x4f, 0xb7, 0x01, 0x05, 0xbe, 0x7b, 0xfb, 0x22, 0x39, 0x97, 0x4d, 0x6a, 0x08, - 0xbe, 0x19, 0x49, 0x5f, 0xac, 0x92, 0xdf, 0x4e, 0x74, 0x2f, 0xb9, 0x44, 0xf7, 0x82, 0xee, 0x42, - 0xd5, 0x3f, 0x0d, 0x86, 0x2b, 0x72, 0x81, 0x92, 0x5e, 0x91, 0x1b, 0x9d, 0xd2, 0x42, 0x46, 0x2f, - 0x84, 0x8d, 0x8e, 0xee, 0x41, 0x9e, 0x4c, 0x88, 0xe5, 0xb9, 0x8d, 0x32, 0x8b, 0x18, 0x55, 0x99, - 0xbb, 0x77, 0x28, 0x55, 0x17, 0x9d, 0xf8, 0xbb, 0xb0, 0xc8, 0xee, 0x48, 0x4f, 0x1c, 0xc3, 0x52, - 0x2f, 0x73, 0xdd, 0xee, 0xbe, 0x30, 0x37, 0xfd, 0x89, 0x6a, 0x90, 0xd9, 0xdd, 0x11, 0x46, 0xc8, - 0xec, 0xee, 0xe0, 0x1f, 0x6b, 0x80, 0xd4, 0x71, 0x33, 0xd9, 0x39, 0x22, 0x5c, 0xc2, 0x67, 0x03, - 0xf8, 0x65, 0xc8, 0x11, 0xc7, 0xb1, 0x1d, 0x66, 0xd1, 0x92, 0xce, 0x1b, 0xf8, 0x2d, 0xa1, 0x83, - 0x4e, 0x26, 0xf6, 0x85, 0x7f, 0x06, 0xb9, 0x34, 0xcd, 0x57, 0x75, 0x0f, 0x96, 0x42, 0x5c, 0x33, - 0x45, 0xae, 0x0f, 0x61, 0x81, 0x09, 0xdb, 0x3e, 0x27, 0xbd, 0x8b, 0x91, 0x6d, 0x5a, 0x31, 0x3c, - 0xba, 0x72, 0x81, 0x83, 0xa5, 0xf3, 0xe0, 0x13, 0xab, 0xf8, 0xc4, 0x6e, 0x77, 0x1f, 0x7f, 0x06, - 0xab, 0x11, 0x39, 0x52, 0xfd, 0x3f, 0x82, 0x72, 0xcf, 0x27, 0xba, 0x22, 0xd7, 0xb9, 0x1d, 0x56, - 0x2e, 0x3a, 0x54, 0x1d, 0x81, 0x0f, 0xe1, 0x46, 0x4c, 0xf4, 0x4c, 0x73, 0x7e, 0x1b, 0x56, 0x98, - 0xc0, 0x3d, 0x42, 0x46, 0xed, 0x81, 0x39, 0x49, 0xb5, 0xf4, 0x48, 0x4c, 0x4a, 0x61, 0xfc, 0x7a, - 0xf7, 0x05, 0xfe, 0x03, 0x81, 0xd8, 0x35, 0x87, 0xa4, 0x6b, 0xef, 0xa7, 0xeb, 0x46, 0xa3, 0xd9, - 0x05, 0xb9, 0x72, 0x45, 0x5a, 0xc3, 0x7e, 0xe3, 0x7f, 0xd6, 0x84, 0xa9, 0xd4, 0xe1, 0x5f, 0xf3, - 0x4e, 0x5e, 0x03, 0x38, 0xa3, 0x47, 0x86, 0xf4, 0x69, 0x07, 0xaf, 0xa8, 0x28, 0x14, 0x5f, 0x4f, - 0xea, 0xbf, 0x2b, 0x42, 0xcf, 0x65, 0xb1, 0xcf, 0xd9, 0x3f, 0xbe, 0x97, 0xbb, 0x0d, 0x65, 0x46, - 0x38, 0xf6, 0x0c, 0x6f, 0xec, 0xc6, 0x16, 0xe3, 0x2f, 0xc4, 0xb6, 0x97, 0x83, 0x66, 0x9a, 0xd7, - 0xb7, 0x20, 0xcf, 0x2e, 0x13, 0x32, 0x95, 0xbe, 0x99, 0xb0, 0x1f, 0xb9, 0x1e, 0xba, 0x60, 0xc4, - 0x3f, 0xd5, 0x20, 0xff, 0x8c, 0x95, 0x60, 0x15, 0xd5, 0xe6, 0xe5, 0x5a, 0x58, 0xc6, 0x90, 0x17, - 0x86, 0x4a, 0x3a, 0xfb, 0xcd, 0x52, 0x4f, 0x42, 0x9c, 0xe7, 0xfa, 0x3e, 0x4f, 0x71, 0x4b, 0xba, - 0xdf, 0xa6, 0x36, 0xeb, 0x0d, 0x4c, 0x62, 0x79, 0xac, 0x77, 0x9e, 0xf5, 0x2a, 0x14, 0x9a, 0x3d, - 0x9b, 0xee, 0x3e, 0x31, 0x1c, 0x4b, 0x14, 0x4d, 0x8b, 0x7a, 0x40, 0xc0, 0xfb, 0x50, 0xe7, 0x7a, - 0xb4, 0xfb, 0x7d, 0x25, 0xc1, 0xf4, 0xd1, 0xb4, 0x08, 0x5a, 0x48, 0x5a, 0x26, 0x2a, 0xed, 0x17, - 0x1a, 0x2c, 0x2a, 0xe2, 0x66, 0xb2, 0xea, 0xbb, 0x90, 0xe7, 0x45, 0x6a, 0x91, 0xe9, 0x2c, 0x87, - 0x47, 0x71, 0x18, 0x5d, 0xf0, 0xa0, 0x4d, 0x28, 0xf0, 0x5f, 0xf2, 0x0e, 0x90, 0xcc, 0x2e, 0x99, - 0xf0, 0x3d, 0x58, 0x12, 0x24, 0x32, 0xb4, 0x93, 0x0e, 0x06, 0x5b, 0x0c, 0xfc, 0x67, 0xb0, 0x1c, - 0x66, 0x9b, 0x69, 0x4a, 0x8a, 0x92, 0x99, 0xd7, 0x51, 0xb2, 0x2d, 0x95, 0x7c, 0x3e, 0xea, 0x2b, - 0x79, 0x54, 0x74, 0xc7, 0xa8, 0xeb, 0x95, 0x09, 0xaf, 0x57, 0x30, 0x01, 0x29, 0xe2, 0x1b, 0x9d, - 0xc0, 0x07, 0x72, 0x3b, 0xec, 0x9b, 0xae, 0xef, 0xc3, 0x31, 0x54, 0x06, 0xa6, 0x45, 0x0c, 0x47, - 0x54, 0xce, 0x35, 0x5e, 0x39, 0x57, 0x69, 0xf8, 0x0b, 0x40, 0xea, 0xc0, 0x6f, 0x54, 0xe9, 0xfb, - 0xd2, 0x64, 0x47, 0x8e, 0x3d, 0xb4, 0x53, 0xcd, 0x8e, 0xff, 0x1c, 0x56, 0x22, 0x7c, 0xdf, 0xa8, - 0x9a, 0x4b, 0xb0, 0xb8, 0x43, 0x64, 0x42, 0x23, 0xdd, 0xde, 0x47, 0x80, 0x54, 0xe2, 0x4c, 0x91, - 0xad, 0x05, 0x8b, 0xcf, 0xec, 0x09, 0x75, 0x91, 0x94, 0x1a, 0xf8, 0x06, 0x5e, 0x87, 0xf0, 0x4d, - 0xe1, 0xb7, 0x29, 0xb8, 0x3a, 0x60, 0x26, 0xf0, 0x7f, 0xd7, 0xa0, 0xd2, 0x1e, 0x18, 0xce, 0x50, - 0x02, 0x7f, 0x0f, 0xf2, 0xfc, 0x76, 0x2d, 0x0a, 0x5a, 0xf7, 0xc3, 0x62, 0x54, 0x5e, 0xde, 0x68, - 0xf3, 0xbb, 0xb8, 0x18, 0x45, 0x15, 0x17, 0x6f, 0x5e, 0x3b, 0x91, 0x37, 0xb0, 0x1d, 0xf4, 0x1e, - 0xe4, 0x0c, 0x3a, 0x84, 0x85, 0xa2, 0x5a, 0xb4, 0xae, 0xc1, 0xa4, 0xb1, 0x3b, 0x00, 0xe7, 0xc2, - 0xdf, 0x81, 0xb2, 0x82, 0x80, 0x0a, 0x90, 0x7d, 0xd2, 0x11, 0x09, 0x7b, 0x7b, 0xbb, 0xbb, 0xfb, - 0x82, 0x17, 0x74, 0x6a, 0x00, 0x3b, 0x1d, 0xbf, 0x9d, 0xc1, 0x9f, 0x8a, 0x51, 0xc2, 0xed, 0xab, - 0xfa, 0x68, 0x69, 0xfa, 0x64, 0x5e, 0x4b, 0x9f, 0x4b, 0xa8, 0x8a, 0xe9, 0xcf, 0x1a, 0xc6, 0x98, - 0xbc, 0x94, 0x30, 0xa6, 0x28, 0xaf, 0x0b, 0x46, 0xfc, 0x2b, 0x0d, 0xea, 0x3b, 0xf6, 0x2b, 0xeb, - 0xcc, 0x31, 0xfa, 0xfe, 0x39, 0xf9, 0x30, 0xb2, 0x52, 0x9b, 0x91, 0xe2, 0x68, 0x84, 0x3f, 0x20, - 0x44, 0x56, 0xac, 0x11, 0x94, 0x0d, 0x79, 0x2c, 0x94, 0x4d, 0xfc, 0x01, 0x2c, 0x44, 0x06, 0x51, - 0xdb, 0xbf, 0x68, 0xef, 0xef, 0xee, 0x50, 0x5b, 0xb3, 0xc2, 0x5a, 0xe7, 0xa0, 0xfd, 0x78, 0xbf, - 0x23, 0x1e, 0x90, 0xda, 0x07, 0xdb, 0x9d, 0xfd, 0x7a, 0x06, 0xf7, 0x60, 0x51, 0x81, 0x9f, 0xf5, - 0x65, 0x20, 0x45, 0xbb, 0x05, 0xa8, 0x8a, 0x68, 0x2f, 0x0e, 0xe5, 0xbf, 0x65, 0xa0, 0x26, 0x29, - 0x5f, 0x0f, 0x26, 0x5a, 0x85, 0x7c, 0xff, 0xf4, 0xd8, 0xfc, 0x42, 0xbe, 0x1c, 0x89, 0x16, 0xa5, - 0x0f, 0x38, 0x0e, 0x7f, 0xbe, 0x15, 0x2d, 0x1a, 0xc6, 0x1d, 0xe3, 0xa5, 0xb7, 0x6b, 0xf5, 0xc9, - 0x25, 0x4b, 0x0a, 0xe6, 0xf5, 0x80, 0xc0, 0x2a, 0x4c, 0xe2, 0x99, 0x97, 0xdd, 0xac, 0x94, 0x67, - 0x5f, 0xf4, 0x00, 0xea, 0xf4, 0x77, 0x7b, 0x34, 0x1a, 0x98, 0xa4, 0xcf, 0x05, 0x14, 0x18, 0x4f, - 0x8c, 0x4e, 0xd1, 0xd9, 0x5d, 0xc4, 0x6d, 0x14, 0x59, 0x58, 0x12, 0x2d, 0xb4, 0x0e, 0x65, 0xae, - 0xdf, 0xae, 0xf5, 0xdc, 0x25, 0xec, 0xed, 0x33, 0xab, 0xab, 0xa4, 0x70, 0x9a, 0x01, 0xd1, 0x34, - 0x63, 0x09, 0x16, 0xdb, 0x63, 0xef, 0xbc, 0x63, 0xd1, 0x58, 0x21, 0xad, 0xbc, 0x0c, 0x88, 0x12, - 0x77, 0x4c, 0x57, 0xa5, 0x0a, 0xd6, 0xf0, 0x82, 0x74, 0x60, 0x89, 0x12, 0x89, 0xe5, 0x99, 0x3d, - 0x25, 0xae, 0xca, 0xcc, 0x4b, 0x8b, 0x64, 0x5e, 0x86, 0xeb, 0xbe, 0xb2, 0x9d, 0xbe, 0xb0, 0xb9, - 0xdf, 0xc6, 0xff, 0xa8, 0x71, 0xc8, 0xe7, 0x6e, 0x28, 0x7d, 0xfa, 0x2d, 0xc5, 0xa0, 0xf7, 0xa1, - 0x60, 0x8f, 0xd8, 0x0b, 0xbf, 0x28, 0xc3, 0xac, 0x6e, 0xf2, 0x6f, 0x02, 0x36, 0x85, 0xe0, 0x43, - 0xde, 0xab, 0x4b, 0x36, 0x74, 0x1f, 0x6a, 0xe7, 0x86, 0x7b, 0x4e, 0xfa, 0x47, 0x52, 0x26, 0xbf, - 0xf9, 0x45, 0xa8, 0x78, 0x23, 0xd0, 0xef, 0x09, 0xf1, 0xa6, 0xe8, 0x87, 0x1f, 0xc2, 0x8a, 0xe4, - 0x14, 0xaf, 0x13, 0x53, 0x98, 0x5f, 0xc1, 0x6d, 0xc9, 0xbc, 0x7d, 0x6e, 0x58, 0x67, 0x44, 0x02, - 0xfe, 0xae, 0x16, 0x88, 0xcf, 0x27, 0x9b, 0x38, 0x9f, 0xc7, 0xd0, 0xf0, 0xe7, 0xc3, 0x6e, 0xd6, - 0xf6, 0x40, 0x55, 0x74, 0xec, 0x8a, 0xf3, 0x54, 0xd2, 0xd9, 0x6f, 0x4a, 0x73, 0xec, 0x81, 0x9f, - 0x4a, 0xd3, 0xdf, 0x78, 0x1b, 0x6e, 0x4a, 0x19, 0xe2, 0xce, 0x1b, 0x16, 0x12, 0x53, 0x3c, 0x49, - 0x88, 0x30, 0x2c, 0x1d, 0x3a, 0x7d, 0xe1, 0x55, 0xce, 0xf0, 0x12, 0x30, 0x99, 0x9a, 0x22, 0x73, - 0x85, 0x6f, 0x4a, 0xaa, 0x98, 0x92, 0x2d, 0x49, 0x32, 0x15, 0xa0, 0x92, 0xc5, 0x82, 0x51, 0x72, - 0x6c, 0xc1, 0x62, 0xa2, 0x7f, 0x00, 0x6b, 0xbe, 0x12, 0xd4, 0x6e, 0x47, 0xc4, 0x19, 0x9a, 0xae, - 0xab, 0xd4, 0xbd, 0x93, 0x26, 0x7e, 0x1f, 0xe6, 0x47, 0x44, 0x04, 0xa1, 0xf2, 0x16, 0x92, 0x9b, - 0x52, 0x19, 0xcc, 0xfa, 0x71, 0x1f, 0xee, 0x48, 0xe9, 0xdc, 0xa2, 0x89, 0xe2, 0xa3, 0x4a, 0xc9, - 0x6a, 0x60, 0x26, 0xa5, 0x1a, 0x98, 0x8d, 0xbc, 0xc5, 0x7c, 0xc4, 0x0d, 0x29, 0xcf, 0xfc, 0x4c, - 0xc9, 0xc5, 0x1e, 0xb7, 0xa9, 0xef, 0x2a, 0x66, 0x12, 0xf6, 0xd7, 0xc2, 0x0b, 0x7c, 0x55, 0x1e, - 0x9e, 0xb0, 0x19, 0xca, 0x87, 0x0e, 0xd9, 0xa4, 0x59, 0x33, 0x5d, 0x00, 0x5d, 0xad, 0x85, 0xce, - 0xeb, 0x21, 0x1a, 0x3e, 0x85, 0xe5, 0xb0, 0x5f, 0x9b, 0x49, 0x97, 0x65, 0xc8, 0x79, 0xf6, 0x05, - 0x91, 0xb1, 0x86, 0x37, 0xa4, 0xed, 0x7c, 0x9f, 0x37, 0x93, 0xed, 0x8c, 0x40, 0x18, 0x3b, 0x1d, - 0xb3, 0xea, 0x4b, 0x37, 0x96, 0xbc, 0x03, 0xf1, 0x06, 0x3e, 0x80, 0xd5, 0xa8, 0x67, 0x9b, 0x49, - 0xe5, 0x17, 0xfc, 0x2c, 0x25, 0x39, 0xbf, 0x99, 0xe4, 0x7e, 0x1c, 0xf8, 0x25, 0xc5, 0xb7, 0xcd, - 0x24, 0x52, 0x87, 0x66, 0x92, 0xab, 0xfb, 0x2a, 0x8e, 0x8e, 0xef, 0xf9, 0x66, 0x12, 0xe6, 0x06, - 0xc2, 0x66, 0x5f, 0xfe, 0xc0, 0x5d, 0x65, 0xa7, 0xba, 0x2b, 0x71, 0x48, 0x02, 0x87, 0xfa, 0x35, - 0x6c, 0x3a, 0x81, 0x11, 0xf8, 0xf2, 0x59, 0x31, 0x68, 0x38, 0xf3, 0x31, 0x58, 0x43, 0x6e, 0x6c, - 0x35, 0x02, 0xcc, 0xb4, 0x18, 0x9f, 0x04, 0x6e, 0x3c, 0x16, 0x24, 0x66, 0x12, 0xfc, 0x29, 0xac, - 0xa7, 0xc7, 0x87, 0x59, 0x24, 0x3f, 0x68, 0x41, 0xc9, 0xbf, 0x0c, 0x29, 0xdf, 0x9b, 0x95, 0xa1, - 0x70, 0x70, 0x78, 0x7c, 0xd4, 0xde, 0xee, 0xf0, 0x0f, 0xce, 0xb6, 0x0f, 0x75, 0xfd, 0xf9, 0x51, - 0xb7, 0x9e, 0xd9, 0xfa, 0x75, 0x16, 0x32, 0x7b, 0x2f, 0xd0, 0x67, 0x90, 0xe3, 0x5f, 0x5f, 0x4c, - 0xf9, 0xe4, 0xa6, 0x39, 0xed, 0x03, 0x13, 0x7c, 0xe3, 0xc7, 0xff, 0xf5, 0xeb, 0x9f, 0x67, 0x16, - 0x71, 0xa5, 0x35, 0xf9, 0x76, 0xeb, 0x62, 0xd2, 0x62, 0x61, 0xea, 0x91, 0xf6, 0x00, 0x7d, 0x0c, - 0xd9, 0xa3, 0xb1, 0x87, 0x52, 0x3f, 0xc5, 0x69, 0xa6, 0x7f, 0x73, 0x82, 0x57, 0x98, 0xd0, 0x05, - 0x0c, 0x42, 0xe8, 0x68, 0xec, 0x51, 0x91, 0x3f, 0x84, 0xb2, 0xfa, 0xc5, 0xc8, 0xb5, 0xdf, 0xe7, - 0x34, 0xaf, 0xff, 0x1a, 0x05, 0xdf, 0x66, 0x50, 0x37, 0x30, 0x12, 0x50, 0xfc, 0x9b, 0x16, 0x75, - 0x16, 0xdd, 0x4b, 0x0b, 0xa5, 0x7e, 0xbd, 0xd3, 0x4c, 0xff, 0x40, 0x25, 0x36, 0x0b, 0xef, 0xd2, - 0xa2, 0x22, 0xff, 0x44, 0x7c, 0x9b, 0xd2, 0xf3, 0xd0, 0x9d, 0x84, 0x6f, 0x13, 0xd4, 0x57, 0xf8, - 0xe6, 0x7a, 0x3a, 0x83, 0x00, 0xb9, 0xc5, 0x40, 0x56, 0xf1, 0xa2, 0x00, 0xe9, 0xf9, 0x2c, 0x8f, - 0xb4, 0x07, 0x5b, 0x3d, 0xc8, 0xb1, 0x17, 0x2e, 0xf4, 0xb9, 0xfc, 0xd1, 0x4c, 0x78, 0xea, 0x4b, - 0x59, 0xe8, 0xd0, 0xdb, 0x18, 0x5e, 0x66, 0x40, 0x35, 0x5c, 0xa2, 0x40, 0xec, 0x7d, 0xeb, 0x91, - 0xf6, 0x60, 0x43, 0x7b, 0x5f, 0xdb, 0xfa, 0x55, 0x0e, 0x72, 0xac, 0xb4, 0x8b, 0x2e, 0x00, 0x82, - 0xd7, 0x9e, 0xe8, 0xec, 0x62, 0xef, 0x47, 0xd1, 0xd9, 0xc5, 0x1f, 0x8a, 0x70, 0x93, 0x81, 0x2e, - 0xe3, 0x05, 0x0a, 0xca, 0x2a, 0xc6, 0x2d, 0x56, 0x04, 0xa7, 0x76, 0xfc, 0x1b, 0x4d, 0x54, 0xb6, - 0xf9, 0x59, 0x42, 0x49, 0xd2, 0x42, 0x4f, 0x3e, 0xd1, 0xed, 0x90, 0xf0, 0xdc, 0x83, 0xbf, 0xcb, - 0x00, 0x5b, 0xb8, 0x1e, 0x00, 0x3a, 0x8c, 0xe3, 0x91, 0xf6, 0xe0, 0xf3, 0x06, 0x5e, 0x12, 0x56, - 0x8e, 0xf4, 0xa0, 0x1f, 0x41, 0x2d, 0xfc, 0xa4, 0x81, 0xee, 0x26, 0x60, 0x45, 0x5f, 0x46, 0x9a, - 0x6f, 0x4d, 0x67, 0x12, 0x3a, 0xad, 0x31, 0x9d, 0x04, 0x38, 0x47, 0xbe, 0x20, 0x64, 0x64, 0x50, - 0x26, 0xb1, 0x06, 0xe8, 0x1f, 0x34, 0xf1, 0xe2, 0x14, 0xbc, 0x51, 0xa0, 0x24, 0xe9, 0xb1, 0x17, - 0x90, 0xe6, 0xbd, 0x6b, 0xb8, 0x84, 0x12, 0x7f, 0xc8, 0x94, 0xf8, 0x00, 0x2f, 0x07, 0x4a, 0x78, - 0xe6, 0x90, 0x78, 0xb6, 0xd0, 0xe2, 0xf3, 0x5b, 0xf8, 0x46, 0xc8, 0x38, 0xa1, 0xde, 0x60, 0xb1, - 0xf8, 0x3b, 0x43, 0xe2, 0x62, 0x85, 0xde, 0x2d, 0x12, 0x17, 0x2b, 0xfc, 0x48, 0x91, 0xb4, 0x58, - 0xfc, 0x55, 0x21, 0x69, 0xb1, 0xfc, 0x9e, 0xad, 0xff, 0x9b, 0x87, 0xc2, 0x36, 0xff, 0x26, 0x1c, - 0xd9, 0x50, 0xf2, 0xcb, 0xf4, 0x68, 0x2d, 0xa9, 0xce, 0x18, 0x5c, 0x6b, 0x9a, 0x77, 0x52, 0xfb, - 0x85, 0x42, 0x6f, 0x32, 0x85, 0xde, 0xc0, 0xab, 0x14, 0x59, 0x7c, 0x76, 0xde, 0xe2, 0xc5, 0xac, - 0x96, 0xd1, 0xef, 0x53, 0x43, 0xfc, 0x29, 0x54, 0xd4, 0x3a, 0x3a, 0x7a, 0x33, 0xb1, 0xb6, 0xa9, - 0x96, 0xe2, 0x9b, 0x78, 0x1a, 0x8b, 0x40, 0x7e, 0x8b, 0x21, 0xaf, 0xe1, 0x9b, 0x09, 0xc8, 0x0e, - 0x63, 0x0d, 0x81, 0xf3, 0x1a, 0x78, 0x32, 0x78, 0xa8, 0xc4, 0x9e, 0x0c, 0x1e, 0x2e, 0xa1, 0x4f, - 0x05, 0x1f, 0x33, 0x56, 0x0a, 0xee, 0x02, 0x04, 0x95, 0x6c, 0x94, 0x68, 0x4b, 0xe5, 0x5e, 0x17, - 0x75, 0x0e, 0xf1, 0x22, 0x38, 0xc6, 0x0c, 0x56, 0xec, 0xbb, 0x08, 0xec, 0xc0, 0x74, 0x3d, 0x7e, - 0x30, 0xab, 0xa1, 0xd2, 0x34, 0x4a, 0x9c, 0x4f, 0xb8, 0xbe, 0xdd, 0xbc, 0x3b, 0x95, 0x47, 0xa0, - 0xdf, 0x63, 0xe8, 0x77, 0x70, 0x33, 0x01, 0x7d, 0xc4, 0x79, 0xe9, 0x66, 0xfb, 0xff, 0x3c, 0x94, - 0x9f, 0x19, 0xa6, 0xe5, 0x11, 0xcb, 0xb0, 0x7a, 0x04, 0x9d, 0x42, 0x8e, 0x45, 0xea, 0xa8, 0x23, - 0x56, 0xcb, 0xb6, 0x51, 0x47, 0x1c, 0xaa, 0x69, 0xe2, 0x75, 0x06, 0xdc, 0xc4, 0x2b, 0x14, 0x78, - 0x18, 0x88, 0x6e, 0xb1, 0x52, 0x24, 0x9d, 0xf4, 0x4b, 0xc8, 0x8b, 0xd7, 0xbe, 0x88, 0xa0, 0x50, - 0xf1, 0xa7, 0x79, 0x2b, 0xb9, 0x33, 0x69, 0x2f, 0xab, 0x30, 0x2e, 0xe3, 0xa3, 0x38, 0x13, 0x80, - 0xa0, 0xc6, 0x1e, 0x5d, 0xd1, 0x58, 0x49, 0xbe, 0xb9, 0x9e, 0xce, 0x90, 0x64, 0x53, 0x15, 0xb3, - 0xef, 0xf3, 0x52, 0xdc, 0x3f, 0x86, 0xf9, 0xa7, 0x86, 0x7b, 0x8e, 0x22, 0xb1, 0x57, 0xf9, 0x56, - 0xac, 0xd9, 0x4c, 0xea, 0x12, 0x28, 0x77, 0x18, 0xca, 0x4d, 0xee, 0xca, 0x54, 0x94, 0x73, 0xc3, - 0xa5, 0x41, 0x0d, 0xf5, 0x21, 0xcf, 0x3f, 0x1d, 0x8b, 0xda, 0x2f, 0xf4, 0xf9, 0x59, 0xd4, 0x7e, - 0xe1, 0xaf, 0xcd, 0xae, 0x47, 0x19, 0x41, 0x51, 0x7e, 0xab, 0x85, 0x22, 0x0f, 0xf7, 0x91, 0xef, - 0xba, 0x9a, 0x6b, 0x69, 0xdd, 0x02, 0xeb, 0x2e, 0xc3, 0xba, 0x8d, 0x1b, 0xb1, 0xb5, 0x12, 0x9c, - 0x8f, 0xb4, 0x07, 0xef, 0x6b, 0xe8, 0x47, 0x00, 0xc1, 0xb3, 0x44, 0xec, 0x04, 0x46, 0x5f, 0x38, - 0x62, 0x27, 0x30, 0xf6, 0xa2, 0x81, 0x37, 0x19, 0xee, 0x06, 0xbe, 0x1b, 0xc5, 0xf5, 0x1c, 0xc3, - 0x72, 0x5f, 0x12, 0xe7, 0x3d, 0x5e, 0x65, 0x75, 0xcf, 0xcd, 0x11, 0x9d, 0xb2, 0x03, 0x25, 0xbf, - 0xea, 0x1c, 0xf5, 0xb6, 0xd1, 0x6a, 0x78, 0xd4, 0xdb, 0xc6, 0xca, 0xd5, 0x61, 0xb7, 0x13, 0xda, - 0x2d, 0x92, 0x95, 0x1e, 0xc0, 0x5f, 0xd4, 0x61, 0x9e, 0x66, 0xdd, 0x34, 0x39, 0x09, 0xea, 0x26, - 0xd1, 0xd9, 0xc7, 0xaa, 0xa8, 0xd1, 0xd9, 0xc7, 0x4b, 0x2e, 0xe1, 0xe4, 0x84, 0x5e, 0xb2, 0x5a, - 0xbc, 0x44, 0x41, 0x67, 0x6a, 0x43, 0x59, 0x29, 0xac, 0xa0, 0x04, 0x61, 0xe1, 0xf2, 0x6c, 0x34, - 0xdc, 0x25, 0x54, 0x65, 0xf0, 0x1b, 0x0c, 0x6f, 0x85, 0x87, 0x3b, 0x86, 0xd7, 0xe7, 0x1c, 0x14, - 0x50, 0xcc, 0x4e, 0x9c, 0xfb, 0x84, 0xd9, 0x85, 0xcf, 0xfe, 0x7a, 0x3a, 0x43, 0xea, 0xec, 0x82, - 0x83, 0xff, 0x0a, 0x2a, 0x6a, 0x79, 0x05, 0x25, 0x28, 0x1f, 0x29, 0x29, 0x47, 0xe3, 0x48, 0x52, - 0x75, 0x26, 0xec, 0xd9, 0x18, 0xa4, 0xa1, 0xb0, 0x51, 0xe0, 0x01, 0x14, 0x44, 0xbd, 0x25, 0xc9, - 0xa4, 0xe1, 0xf2, 0x73, 0x92, 0x49, 0x23, 0xc5, 0x9a, 0x70, 0xf6, 0xcc, 0x10, 0xe9, 0x95, 0x52, - 0xc6, 0x6a, 0x81, 0xf6, 0x84, 0x78, 0x69, 0x68, 0x41, 0x25, 0x33, 0x0d, 0x4d, 0xb9, 0xce, 0xa7, - 0xa1, 0x9d, 0x11, 0x4f, 0xf8, 0x03, 0x79, 0x4d, 0x46, 0x29, 0xc2, 0xd4, 0xf8, 0x88, 0xa7, 0xb1, - 0x24, 0x5d, 0x6e, 0x02, 0x40, 0x19, 0x1c, 0x2f, 0x01, 0x82, 0x6a, 0x50, 0x34, 0x63, 0x4d, 0xac, - 0x82, 0x47, 0x33, 0xd6, 0xe4, 0x82, 0x52, 0xd8, 0xf7, 0x05, 0xb8, 0xfc, 0x6e, 0x45, 0x91, 0x7f, - 0xa6, 0x01, 0x8a, 0x17, 0x8e, 0xd0, 0xc3, 0x64, 0xe9, 0x89, 0xb5, 0xf5, 0xe6, 0xbb, 0xaf, 0xc7, - 0x9c, 0x14, 0xce, 0x02, 0x95, 0x7a, 0x8c, 0x7b, 0xf4, 0x8a, 0x2a, 0xf5, 0x97, 0x1a, 0x54, 0x43, - 0x55, 0x27, 0x74, 0x3f, 0x65, 0x4d, 0x23, 0x25, 0xf7, 0xe6, 0xdb, 0xd7, 0xf2, 0x25, 0xa5, 0xf2, - 0xca, 0x0e, 0x90, 0x77, 0x9a, 0x9f, 0x68, 0x50, 0x0b, 0x57, 0xa9, 0x50, 0x8a, 0xec, 0x58, 0xc9, - 0xbe, 0xb9, 0x71, 0x3d, 0xe3, 0xf4, 0xe5, 0x09, 0xae, 0x33, 0x03, 0x28, 0x88, 0xba, 0x56, 0xd2, - 0xc6, 0x0f, 0x17, 0xfb, 0x93, 0x36, 0x7e, 0xa4, 0x28, 0x96, 0xb0, 0xf1, 0x1d, 0x7b, 0x40, 0x94, - 0x63, 0x26, 0x0a, 0x5f, 0x69, 0x68, 0xd3, 0x8f, 0x59, 0xa4, 0x6a, 0x96, 0x86, 0x16, 0x1c, 0x33, - 0x59, 0xf1, 0x42, 0x29, 0xc2, 0xae, 0x39, 0x66, 0xd1, 0x82, 0x59, 0xc2, 0x31, 0x63, 0x80, 0xca, - 0x31, 0x0b, 0x6a, 0x53, 0x49, 0xc7, 0x2c, 0xf6, 0x76, 0x91, 0x74, 0xcc, 0xe2, 0xe5, 0xad, 0x84, - 0x75, 0x64, 0xb8, 0xa1, 0x63, 0xb6, 0x94, 0x50, 0xc6, 0x42, 0xef, 0xa6, 0x18, 0x31, 0xf1, 0x49, - 0xa4, 0xf9, 0xde, 0x6b, 0x72, 0xa7, 0xee, 0x71, 0x6e, 0x7e, 0xb9, 0xc7, 0xff, 0x56, 0x83, 0xe5, - 0xa4, 0x12, 0x18, 0x4a, 0xc1, 0x49, 0x79, 0x4a, 0x69, 0x6e, 0xbe, 0x2e, 0xfb, 0x74, 0x6b, 0xf9, - 0xbb, 0xfe, 0x71, 0xfd, 0x5f, 0xbf, 0x5c, 0xd3, 0xfe, 0xe3, 0xcb, 0x35, 0xed, 0xbf, 0xbf, 0x5c, - 0xd3, 0xfe, 0xee, 0x7f, 0xd6, 0xe6, 0x4e, 0xf3, 0xec, 0x3f, 0x1a, 0x7f, 0xfb, 0x37, 0x01, 0x00, - 0x00, 0xff, 0xff, 0xee, 0x4f, 0x63, 0x90, 0xed, 0x3c, 0x00, 0x00, + // 4393 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5c, 0x4f, 0x6f, 0x24, 0x49, + 0x56, 0x77, 0xd6, 0x5f, 0xd7, 0xab, 0x72, 0xb9, 0x1c, 0xed, 0xee, 0xa9, 0xae, 0xe9, 0x76, 0x7b, + 0xb2, 0xa7, 0x67, 0x3c, 0x9e, 0x19, 0xbb, 0xdb, 0x76, 0xef, 0x40, 0xa3, 0x19, 0xb6, 0xda, 0xae, + 0x69, 0x9b, 0x76, 0xdb, 0xde, 0x74, 0x75, 0xcf, 0xce, 0x20, 0xad, 0x49, 0x57, 0x45, 0xdb, 0xb9, + 0xae, 0xca, 0xac, 0xcd, 0xcc, 0x72, 0xdb, 0xcb, 0x61, 0x97, 0x85, 0x65, 0xb5, 0x20, 0xad, 0xc4, + 0x22, 0xa1, 0x15, 0x82, 0x0b, 0x42, 0x82, 0x03, 0x20, 0x38, 0x70, 0x40, 0x1c, 0x38, 0xc0, 0x01, + 0x0e, 0x48, 0x48, 0x7c, 0x01, 0x18, 0xf6, 0xc4, 0x57, 0x40, 0x42, 0xab, 0xf8, 0x97, 0x11, 0x99, + 0x19, 0x65, 0x7b, 0xd6, 0x1e, 0xed, 0x65, 0xba, 0x32, 0xe2, 0xc5, 0xfb, 0xbd, 0x78, 0x2f, 0xe2, + 0xbd, 0x88, 0xf7, 0xc2, 0x03, 0x25, 0x7f, 0xd0, 0x59, 0x18, 0xf8, 0x5e, 0xe8, 0xa1, 0x0a, 0x0e, + 0x3b, 0xdd, 0x00, 0xfb, 0xc7, 0xd8, 0x1f, 0xec, 0x37, 0xa6, 0x0f, 0xbc, 0x03, 0x8f, 0x76, 0x2c, + 0x92, 0x5f, 0x8c, 0xa6, 0x51, 0x27, 0x34, 0x8b, 0xf6, 0xc0, 0x59, 0xec, 0x1f, 0x77, 0x3a, 0x83, + 0xfd, 0xc5, 0xa3, 0x63, 0xde, 0xd3, 0x88, 0x7a, 0xec, 0x61, 0x78, 0x38, 0xd8, 0xa7, 0xff, 0xf0, + 0xbe, 0xd9, 0xa8, 0xef, 0x18, 0xfb, 0x81, 0xe3, 0xb9, 0x83, 0x7d, 0xf1, 0x8b, 0x53, 0xdc, 0x3a, + 0xf0, 0xbc, 0x83, 0x1e, 0x66, 0xe3, 0x5d, 0xd7, 0x0b, 0xed, 0xd0, 0xf1, 0xdc, 0x80, 0xf5, 0x9a, + 0x3f, 0x32, 0xa0, 0x6a, 0xe1, 0x60, 0xe0, 0xb9, 0x01, 0x5e, 0xc7, 0x76, 0x17, 0xfb, 0xe8, 0x36, + 0x40, 0xa7, 0x37, 0x0c, 0x42, 0xec, 0xef, 0x39, 0xdd, 0xba, 0x31, 0x6b, 0xcc, 0xe5, 0xac, 0x12, + 0x6f, 0xd9, 0xe8, 0xa2, 0xd7, 0xa1, 0xd4, 0xc7, 0xfd, 0x7d, 0xd6, 0x9b, 0xa1, 0xbd, 0xe3, 0xac, + 0x61, 0xa3, 0x8b, 0x1a, 0x30, 0xee, 0xe3, 0x63, 0x87, 0xc0, 0xd7, 0xb3, 0xb3, 0xc6, 0x5c, 0xd6, + 0x8a, 0xbe, 0xc9, 0x40, 0xdf, 0x7e, 0x19, 0xee, 0x85, 0xd8, 0xef, 0xd7, 0x73, 0x6c, 0x20, 0x69, + 0x68, 0x63, 0xbf, 0xff, 0xa8, 0xf8, 0xbd, 0xbf, 0xaf, 0x67, 0x97, 0x17, 0xee, 0x9b, 0xff, 0x9c, + 0x87, 0x8a, 0x65, 0xbb, 0x07, 0xd8, 0xc2, 0xdf, 0x1a, 0xe2, 0x20, 0x44, 0x35, 0xc8, 0x1e, 0xe1, + 0x53, 0x2a, 0x47, 0xc5, 0x22, 0x3f, 0x19, 0x23, 0xf7, 0x00, 0xef, 0x61, 0x97, 0x49, 0x50, 0x21, + 0x8c, 0xdc, 0x03, 0xdc, 0x72, 0xbb, 0x68, 0x1a, 0xf2, 0x3d, 0xa7, 0xef, 0x84, 0x1c, 0x9e, 0x7d, + 0xc4, 0xe4, 0xca, 0x25, 0xe4, 0x5a, 0x05, 0x08, 0x3c, 0x3f, 0xdc, 0xf3, 0xfc, 0x2e, 0xf6, 0xeb, + 0xf9, 0x59, 0x63, 0xae, 0xba, 0xf4, 0xe6, 0x82, 0x6a, 0xb1, 0x05, 0x55, 0xa0, 0x85, 0x5d, 0xcf, + 0x0f, 0xb7, 0x09, 0xad, 0x55, 0x0a, 0xc4, 0x4f, 0xf4, 0x31, 0x94, 0x29, 0x93, 0xd0, 0xf6, 0x0f, + 0x70, 0x58, 0x2f, 0x50, 0x2e, 0xf7, 0xce, 0xe1, 0xd2, 0xa6, 0xc4, 0x16, 0x85, 0x67, 0xbf, 0x91, + 0x09, 0x95, 0x00, 0xfb, 0x8e, 0xdd, 0x73, 0xbe, 0x6d, 0xef, 0xf7, 0x70, 0xbd, 0x38, 0x6b, 0xcc, + 0x8d, 0x5b, 0xb1, 0x36, 0x32, 0xff, 0x23, 0x7c, 0x1a, 0xec, 0x79, 0x6e, 0xef, 0xb4, 0x3e, 0x4e, + 0x09, 0xc6, 0x49, 0xc3, 0xb6, 0xdb, 0x3b, 0xa5, 0xd6, 0xf3, 0x86, 0x6e, 0xc8, 0x7a, 0x4b, 0xb4, + 0xb7, 0x44, 0x5b, 0x68, 0xf7, 0x03, 0xa8, 0xf5, 0x1d, 0x77, 0xaf, 0xef, 0x75, 0xf7, 0x22, 0x85, + 0x00, 0x51, 0xc8, 0xe3, 0xe2, 0xef, 0x51, 0x0b, 0x3c, 0xb0, 0xaa, 0x7d, 0xc7, 0x7d, 0xe6, 0x75, + 0x2d, 0xa1, 0x1f, 0x32, 0xc4, 0x3e, 0x89, 0x0f, 0x29, 0x27, 0x87, 0xd8, 0x27, 0xea, 0x90, 0x0f, + 0xe0, 0x1a, 0x41, 0xe9, 0xf8, 0xd8, 0x0e, 0xb1, 0x1c, 0x55, 0x89, 0x8f, 0x9a, 0xea, 0x3b, 0xee, + 0x2a, 0x25, 0x89, 0x0d, 0xb4, 0x4f, 0x52, 0x03, 0x27, 0x92, 0x03, 0xed, 0x93, 0xf8, 0x40, 0xf3, + 0x03, 0x28, 0x45, 0x76, 0x41, 0xe3, 0x90, 0xdb, 0xda, 0xde, 0x6a, 0xd5, 0xc6, 0x10, 0x40, 0xa1, + 0xb9, 0xbb, 0xda, 0xda, 0x5a, 0xab, 0x19, 0xa8, 0x0c, 0xc5, 0xb5, 0x16, 0xfb, 0xc8, 0x34, 0x8a, + 0x3f, 0xe6, 0xeb, 0xed, 0x29, 0x80, 0x34, 0x05, 0x2a, 0x42, 0xf6, 0x69, 0xeb, 0xd3, 0xda, 0x18, + 0x21, 0x7e, 0xd1, 0xb2, 0x76, 0x37, 0xb6, 0xb7, 0x6a, 0x06, 0xe1, 0xb2, 0x6a, 0xb5, 0x9a, 0xed, + 0x56, 0x2d, 0x43, 0x28, 0x9e, 0x6d, 0xaf, 0xd5, 0xb2, 0xa8, 0x04, 0xf9, 0x17, 0xcd, 0xcd, 0xe7, + 0xad, 0x5a, 0x2e, 0x62, 0x26, 0x57, 0xf1, 0x9f, 0x18, 0x30, 0xc1, 0xcd, 0xcd, 0xf6, 0x16, 0x5a, + 0x81, 0xc2, 0x21, 0xdd, 0x5f, 0x74, 0x25, 0x97, 0x97, 0x6e, 0x25, 0xd6, 0x46, 0x6c, 0x0f, 0x5a, + 0x9c, 0x16, 0x99, 0x90, 0x3d, 0x3a, 0x0e, 0xea, 0x99, 0xd9, 0xec, 0x5c, 0x79, 0xa9, 0xb6, 0xc0, + 0x3c, 0xc3, 0xc2, 0x53, 0x7c, 0xfa, 0xc2, 0xee, 0x0d, 0xb1, 0x45, 0x3a, 0x11, 0x82, 0x5c, 0xdf, + 0xf3, 0x31, 0x5d, 0xf0, 0xe3, 0x16, 0xfd, 0x4d, 0x76, 0x01, 0xb5, 0x39, 0x5f, 0xec, 0xec, 0x43, + 0x8a, 0xf7, 0xef, 0x06, 0xc0, 0xce, 0x30, 0x1c, 0xbd, 0xc5, 0xa6, 0x21, 0x7f, 0x4c, 0x10, 0xf8, + 0xf6, 0x62, 0x1f, 0x74, 0x6f, 0x61, 0x3b, 0xc0, 0xd1, 0xde, 0x22, 0x1f, 0x68, 0x16, 0x8a, 0x03, + 0x1f, 0x1f, 0xef, 0x1d, 0x1d, 0x53, 0xb4, 0x71, 0x69, 0xa7, 0x02, 0x69, 0x7f, 0x7a, 0x8c, 0xe6, + 0xa1, 0xe2, 0x1c, 0xb8, 0x9e, 0x8f, 0xf7, 0x18, 0xd3, 0xbc, 0x4a, 0xb6, 0x64, 0x95, 0x59, 0x27, + 0x9d, 0x92, 0x42, 0xcb, 0xa0, 0x0a, 0x5a, 0xda, 0x4d, 0xd2, 0x27, 0xe7, 0xf3, 0x5d, 0x03, 0xca, + 0x74, 0x3e, 0x97, 0x52, 0xf6, 0x92, 0x9c, 0x48, 0x86, 0x0e, 0x4b, 0x29, 0x3c, 0x35, 0x35, 0x29, + 0x82, 0x0b, 0x68, 0x0d, 0xf7, 0x70, 0x88, 0x2f, 0xe3, 0xbc, 0x14, 0x55, 0x66, 0xb5, 0xaa, 0x94, + 0x78, 0x7f, 0x6e, 0xc0, 0xb5, 0x18, 0xe0, 0xa5, 0xa6, 0x5e, 0x87, 0x62, 0x97, 0x32, 0x63, 0x32, + 0x65, 0x2d, 0xf1, 0x89, 0x56, 0x60, 0x9c, 0x8b, 0x14, 0xd4, 0xb3, 0xfa, 0x65, 0x28, 0xa5, 0x2c, + 0x32, 0x29, 0x03, 0x29, 0xe6, 0x3f, 0x66, 0xa0, 0xc4, 0x95, 0xb1, 0x3d, 0x40, 0x4d, 0x98, 0xf0, + 0xd9, 0xc7, 0x1e, 0x9d, 0x33, 0x97, 0xb1, 0x31, 0xda, 0x4f, 0xae, 0x8f, 0x59, 0x15, 0x3e, 0x84, + 0x36, 0xa3, 0x5f, 0x81, 0xb2, 0x60, 0x31, 0x18, 0x86, 0xdc, 0x50, 0xf5, 0x38, 0x03, 0xb9, 0xb4, + 0xd7, 0xc7, 0x2c, 0xe0, 0xe4, 0x3b, 0xc3, 0x10, 0xb5, 0x61, 0x5a, 0x0c, 0x66, 0xf3, 0xe3, 0x62, + 0x64, 0x29, 0x97, 0xd9, 0x38, 0x97, 0xb4, 0x39, 0xd7, 0xc7, 0x2c, 0xc4, 0xc7, 0x2b, 0x9d, 0x68, + 0x4d, 0x8a, 0x14, 0x9e, 0xb0, 0xf8, 0x92, 0x12, 0xa9, 0x7d, 0xe2, 0x72, 0x26, 0x42, 0x5b, 0xcb, + 0x8a, 0x6c, 0xed, 0x13, 0x37, 0x52, 0xd9, 0xe3, 0x12, 0x14, 0x79, 0xb3, 0xf9, 0x6f, 0x19, 0x00, + 0x61, 0xb1, 0xed, 0x01, 0x5a, 0x83, 0xaa, 0xcf, 0xbf, 0x62, 0xfa, 0x7b, 0x5d, 0xab, 0x3f, 0x6e, + 0xe8, 0x31, 0x6b, 0x42, 0x0c, 0x62, 0xe2, 0x7e, 0x04, 0x95, 0x88, 0x8b, 0x54, 0xe1, 0x4d, 0x8d, + 0x0a, 0x23, 0x0e, 0x65, 0x31, 0x80, 0x28, 0xf1, 0x13, 0xb8, 0x1e, 0x8d, 0xd7, 0x68, 0xf1, 0x8d, + 0x33, 0xb4, 0x18, 0x31, 0xbc, 0x26, 0x38, 0xa8, 0x7a, 0x7c, 0xa2, 0x08, 0x26, 0x15, 0x79, 0x53, + 0xa3, 0x48, 0x46, 0xa4, 0x6a, 0x32, 0x92, 0x30, 0xa6, 0x4a, 0x20, 0x61, 0x9f, 0xb5, 0x9b, 0x7f, + 0x99, 0x83, 0xe2, 0xaa, 0xd7, 0x1f, 0xd8, 0x3e, 0x59, 0x44, 0x05, 0x1f, 0x07, 0xc3, 0x5e, 0x48, + 0x15, 0x58, 0x5d, 0xba, 0x1b, 0xc7, 0xe0, 0x64, 0xe2, 0x5f, 0x8b, 0x92, 0x5a, 0x7c, 0x08, 0x19, + 0xcc, 0xa3, 0x7c, 0xe6, 0x02, 0x83, 0x79, 0x8c, 0xe7, 0x43, 0x84, 0x43, 0xc8, 0x4a, 0x87, 0xd0, + 0x80, 0x22, 0x3f, 0xb0, 0x31, 0x67, 0xbd, 0x3e, 0x66, 0x89, 0x06, 0xf4, 0x0e, 0x4c, 0x26, 0x43, + 0x61, 0x9e, 0xd3, 0x54, 0x3b, 0xf1, 0xc8, 0x79, 0x17, 0x2a, 0xb1, 0x08, 0x5d, 0xe0, 0x74, 0xe5, + 0xbe, 0x12, 0x97, 0x6f, 0x08, 0xb7, 0x4e, 0x8e, 0x15, 0x95, 0xf5, 0x31, 0xe1, 0xd8, 0xef, 0x08, + 0xc7, 0x3e, 0xae, 0x06, 0x5a, 0xa2, 0x57, 0xee, 0xe3, 0xdf, 0x54, 0xbd, 0xd6, 0x57, 0xc9, 0xe0, + 0x88, 0x48, 0xba, 0x2f, 0xd3, 0x82, 0x89, 0x98, 0xca, 0x48, 0x8c, 0x6c, 0x7d, 0xed, 0x79, 0x73, + 0x93, 0x05, 0xd4, 0x27, 0x34, 0x86, 0x5a, 0x35, 0x83, 0x04, 0xe8, 0xcd, 0xd6, 0xee, 0x6e, 0x2d, + 0x83, 0x6e, 0x40, 0x69, 0x6b, 0xbb, 0xbd, 0xc7, 0xa8, 0xb2, 0x8d, 0xe2, 0x1f, 0x33, 0x4f, 0x22, + 0xe3, 0xf3, 0xa7, 0x11, 0x4f, 0x1e, 0xa2, 0x95, 0xc8, 0x3c, 0xa6, 0x44, 0x66, 0x43, 0x44, 0xe6, + 0x8c, 0x8c, 0xcc, 0x59, 0x84, 0x20, 0xbf, 0xd9, 0x6a, 0xee, 0xd2, 0x20, 0xcd, 0x58, 0x2f, 0xa7, + 0xa3, 0xf5, 0xe3, 0x2a, 0x54, 0x98, 0x79, 0xf6, 0x86, 0x2e, 0x39, 0x4c, 0xfc, 0x95, 0x01, 0x20, + 0x37, 0x2c, 0x5a, 0x84, 0x62, 0x87, 0x89, 0x50, 0x37, 0xa8, 0x07, 0xbc, 0xae, 0xb5, 0xb8, 0x25, + 0xa8, 0xd0, 0x03, 0x28, 0x06, 0xc3, 0x4e, 0x07, 0x07, 0x22, 0x72, 0xbf, 0x96, 0x74, 0xc2, 0xdc, + 0x21, 0x5a, 0x82, 0x8e, 0x0c, 0x79, 0x69, 0x3b, 0xbd, 0x21, 0x8d, 0xe3, 0x67, 0x0f, 0xe1, 0x74, + 0xd2, 0xc7, 0xfe, 0x99, 0x01, 0x65, 0x65, 0x5b, 0xfc, 0x9c, 0x21, 0xe0, 0x16, 0x94, 0xa8, 0x30, + 0xb8, 0xcb, 0x83, 0xc0, 0xb8, 0x25, 0x1b, 0xd0, 0x57, 0xa0, 0x24, 0x76, 0x92, 0x88, 0x03, 0x75, + 0x3d, 0xdb, 0xed, 0x81, 0x25, 0x49, 0xa5, 0x90, 0x6d, 0x98, 0xa2, 0x7a, 0xea, 0x90, 0xdb, 0x87, + 0xd0, 0xac, 0x7a, 0x2c, 0x37, 0x12, 0xc7, 0xf2, 0x06, 0x8c, 0x0f, 0x0e, 0x4f, 0x03, 0xa7, 0x63, + 0xf7, 0xb8, 0x38, 0xd1, 0xb7, 0xe4, 0xba, 0x0b, 0x48, 0xe5, 0x7a, 0x19, 0x05, 0x48, 0xa6, 0x37, + 0xa0, 0xbc, 0x6e, 0x07, 0x87, 0x5c, 0x48, 0xd9, 0xbe, 0x02, 0x13, 0xa4, 0xfd, 0xe9, 0x8b, 0x0b, + 0x88, 0x2f, 0x46, 0x2d, 0xd3, 0x1b, 0x96, 0x18, 0x76, 0x29, 0x03, 0x21, 0xc8, 0x1d, 0xda, 0xc1, + 0x21, 0x55, 0xc6, 0x84, 0x45, 0x7f, 0xa3, 0x77, 0xa0, 0xd6, 0x61, 0xf3, 0xdf, 0x4b, 0xdc, 0xbb, + 0x26, 0x79, 0xbb, 0x95, 0x12, 0xc8, 0x86, 0x0a, 0x9b, 0xde, 0x55, 0x4b, 0x23, 0x35, 0xd5, 0x80, + 0xc9, 0x5d, 0xd7, 0x1e, 0x04, 0x87, 0x5e, 0x98, 0xd0, 0xe2, 0xb2, 0xf9, 0x77, 0x06, 0xd4, 0x64, + 0xe7, 0xa5, 0x64, 0x78, 0x1b, 0x26, 0x7d, 0xdc, 0xb7, 0x1d, 0xd7, 0x71, 0x0f, 0xf6, 0xf6, 0x4f, + 0x43, 0x1c, 0xf0, 0x0b, 0x69, 0x35, 0x6a, 0x7e, 0x4c, 0x5a, 0x89, 0xb0, 0xfb, 0x3d, 0x6f, 0x9f, + 0xbb, 0x5d, 0xfa, 0x1b, 0xbd, 0x11, 0xf7, 0xbb, 0x25, 0xe1, 0xd0, 0xbe, 0x12, 0xb9, 0x5f, 0x29, + 0xf3, 0x4f, 0x32, 0x50, 0xf9, 0xc4, 0x0e, 0x3b, 0x62, 0x4d, 0xa0, 0x0d, 0xa8, 0x46, 0x8e, 0x99, + 0xb6, 0x70, 0xb9, 0x13, 0x47, 0x08, 0x3a, 0x46, 0xdc, 0x54, 0xc4, 0x11, 0x62, 0xa2, 0xa3, 0x36, + 0x50, 0x56, 0xb6, 0xdb, 0xc1, 0xbd, 0x88, 0x55, 0x66, 0x34, 0x2b, 0x4a, 0xa8, 0xb2, 0x52, 0x1b, + 0xd0, 0xd7, 0xa1, 0x36, 0xf0, 0xbd, 0x03, 0x1f, 0x07, 0x41, 0xc4, 0x8c, 0x05, 0x65, 0x53, 0xc3, + 0x6c, 0x87, 0x93, 0x26, 0xce, 0x25, 0x2b, 0xeb, 0x63, 0xd6, 0xe4, 0x20, 0xde, 0x27, 0x5d, 0xe5, + 0xa4, 0x3c, 0xc1, 0x31, 0x5f, 0xf9, 0x83, 0x2c, 0xa0, 0xf4, 0x34, 0xbf, 0xe8, 0xc1, 0xf7, 0x1e, + 0x54, 0x83, 0xd0, 0xf6, 0x53, 0xab, 0x78, 0x82, 0xb6, 0x46, 0xf1, 0xeb, 0x6d, 0x88, 0x24, 0xdb, + 0x73, 0xbd, 0xd0, 0x79, 0x79, 0xca, 0xae, 0x1c, 0x56, 0x55, 0x34, 0x6f, 0xd1, 0x56, 0xb4, 0x05, + 0xc5, 0x97, 0x4e, 0x2f, 0xc4, 0x7e, 0x50, 0xcf, 0xcf, 0x66, 0xe7, 0xaa, 0x4b, 0xef, 0x9e, 0x67, + 0x98, 0x85, 0x8f, 0x29, 0x7d, 0xfb, 0x74, 0xa0, 0x9e, 0x67, 0x39, 0x13, 0xf5, 0x60, 0x5e, 0xd0, + 0xdf, 0x71, 0x4c, 0x18, 0x7f, 0x45, 0x98, 0xee, 0x39, 0x5d, 0x1a, 0x5d, 0xa3, 0x28, 0xba, 0x62, + 0x15, 0x69, 0xc7, 0x46, 0x17, 0xdd, 0x85, 0xf1, 0x97, 0xbe, 0x7d, 0xd0, 0xc7, 0x6e, 0xc8, 0xee, + 0xed, 0x92, 0x26, 0xea, 0x30, 0x17, 0x00, 0xa4, 0x28, 0x24, 0x96, 0x6d, 0x6d, 0xef, 0x3c, 0x6f, + 0xd7, 0xc6, 0x50, 0x05, 0xc6, 0xb7, 0xb6, 0xd7, 0x5a, 0x9b, 0x2d, 0x12, 0xed, 0x44, 0x14, 0x7b, + 0x20, 0x37, 0x5d, 0x53, 0x18, 0x22, 0xb6, 0x26, 0x54, 0xb9, 0x8c, 0xf8, 0x35, 0x5a, 0xc8, 0x25, + 0x58, 0x3c, 0x30, 0xef, 0xc0, 0xb4, 0x6e, 0x69, 0x08, 0x82, 0x15, 0xf3, 0x5f, 0x32, 0x30, 0xc1, + 0x37, 0xc2, 0xa5, 0x76, 0xee, 0x4d, 0x45, 0x2a, 0x7e, 0xe1, 0x10, 0x4a, 0xaa, 0x43, 0x91, 0x6d, + 0x90, 0x2e, 0xbf, 0xd1, 0x8a, 0x4f, 0xe2, 0x6e, 0xd9, 0x7a, 0xc7, 0x5d, 0x6e, 0xf6, 0xe8, 0x5b, + 0xeb, 0x08, 0xf3, 0x5a, 0x47, 0x88, 0xde, 0x83, 0x89, 0x68, 0xc3, 0xd9, 0x01, 0x3f, 0x2a, 0x95, + 0xa4, 0x29, 0x2a, 0x62, 0x53, 0x91, 0xce, 0x98, 0xcd, 0x8a, 0x23, 0x6c, 0x86, 0xee, 0x41, 0x01, + 0x1f, 0x63, 0x37, 0x0c, 0xea, 0x65, 0x1a, 0x1a, 0x27, 0xc4, 0x15, 0xa9, 0x45, 0x5a, 0x2d, 0xde, + 0x29, 0x4d, 0xf5, 0x11, 0x4c, 0xd1, 0x1b, 0xec, 0x13, 0xdf, 0x76, 0xd5, 0x5b, 0x78, 0xbb, 0xbd, + 0xc9, 0x03, 0x09, 0xf9, 0x89, 0xaa, 0x90, 0xd9, 0x58, 0xe3, 0xfa, 0xc9, 0x6c, 0xac, 0xc9, 0xf1, + 0xbf, 0x6f, 0x00, 0x52, 0x19, 0x5c, 0xca, 0x16, 0x09, 0x14, 0x21, 0x47, 0x56, 0xca, 0x31, 0x0d, + 0x79, 0xec, 0xfb, 0x9e, 0xcf, 0x1c, 0xa5, 0xc5, 0x3e, 0xa4, 0x34, 0xef, 0x73, 0x61, 0x2c, 0x7c, + 0xec, 0x1d, 0x45, 0x1e, 0x80, 0xb1, 0x35, 0xd2, 0xc2, 0xb7, 0xe1, 0x5a, 0x8c, 0xfc, 0x6a, 0x82, + 0xf6, 0x36, 0x4c, 0x52, 0xae, 0xab, 0x87, 0xb8, 0x73, 0x34, 0xf0, 0x1c, 0x37, 0x25, 0x01, 0xba, + 0x4b, 0x7c, 0x97, 0x08, 0x17, 0x64, 0x8a, 0x6c, 0xce, 0x95, 0xa8, 0xb1, 0xdd, 0xde, 0x94, 0x4b, + 0x7d, 0x1f, 0x6e, 0x24, 0x18, 0x8a, 0x99, 0xfd, 0x2a, 0x94, 0x3b, 0x51, 0x63, 0xc0, 0xcf, 0x84, + 0xb7, 0xe3, 0xe2, 0x26, 0x87, 0xaa, 0x23, 0x24, 0xc6, 0xd7, 0xe1, 0xb5, 0x14, 0xc6, 0x55, 0xa8, + 0x63, 0xc5, 0xbc, 0x0f, 0xd7, 0x29, 0xe7, 0xa7, 0x18, 0x0f, 0x9a, 0x3d, 0xe7, 0xf8, 0x7c, 0xb3, + 0x9c, 0xf2, 0xf9, 0x2a, 0x23, 0xbe, 0xdc, 0x65, 0x25, 0xa1, 0x5b, 0x1c, 0xba, 0xed, 0xf4, 0x71, + 0xdb, 0xdb, 0x1c, 0x2d, 0x2d, 0x09, 0xe4, 0x47, 0xf8, 0x34, 0xe0, 0x07, 0x42, 0xfa, 0x5b, 0x7a, + 0xaf, 0xbf, 0x31, 0xb8, 0x3a, 0x55, 0x3e, 0x5f, 0xf2, 0xd6, 0x98, 0x01, 0x38, 0x20, 0x7b, 0x10, + 0x77, 0x49, 0x07, 0xcb, 0xb6, 0x29, 0x2d, 0x91, 0xc0, 0x24, 0x0a, 0x55, 0x92, 0x02, 0xdf, 0xe6, + 0x1b, 0x87, 0xfe, 0x27, 0x48, 0x9d, 0x94, 0xde, 0x82, 0x32, 0xed, 0xd9, 0x0d, 0xed, 0x70, 0x18, + 0x8c, 0xb2, 0xdc, 0xb2, 0xf9, 0x03, 0x83, 0xef, 0x28, 0xc1, 0xe7, 0x52, 0x73, 0x7e, 0x00, 0x05, + 0x7a, 0xe7, 0x13, 0x77, 0x97, 0x9b, 0x9a, 0x85, 0xcd, 0x24, 0xb2, 0x38, 0xa1, 0x72, 0x4e, 0x32, + 0xa0, 0xf0, 0x8c, 0xd6, 0x02, 0x14, 0x69, 0x73, 0xc2, 0x72, 0xae, 0xdd, 0x67, 0x09, 0xc5, 0x92, + 0x45, 0x7f, 0xd3, 0x23, 0x3e, 0xc6, 0xfe, 0x73, 0x6b, 0x93, 0xdd, 0x29, 0x4a, 0x56, 0xf4, 0x4d, + 0x14, 0xdb, 0xe9, 0x39, 0xd8, 0x0d, 0x69, 0x6f, 0x8e, 0xf6, 0x2a, 0x2d, 0xe8, 0x1e, 0x94, 0x9c, + 0x60, 0x13, 0xdb, 0xbe, 0xcb, 0x93, 0xf6, 0x8a, 0x63, 0x96, 0x3d, 0x72, 0x8d, 0x7d, 0x03, 0x6a, + 0x4c, 0xb2, 0x66, 0xb7, 0xab, 0x9c, 0xdf, 0x23, 0x7c, 0x23, 0x81, 0x1f, 0xe3, 0x9f, 0x39, 0x9f, + 0xff, 0xdf, 0x1a, 0x30, 0xa5, 0x00, 0x5c, 0xca, 0x04, 0xef, 0x41, 0x81, 0x55, 0x54, 0xf8, 0x51, + 0x70, 0x3a, 0x3e, 0x8a, 0xc1, 0x58, 0x9c, 0x06, 0x2d, 0x40, 0x91, 0xfd, 0x12, 0x17, 0x33, 0x3d, + 0xb9, 0x20, 0x92, 0x22, 0x2f, 0xc0, 0x35, 0xde, 0x87, 0xfb, 0x9e, 0x6e, 0xcf, 0xe5, 0xe2, 0x1e, + 0xe2, 0xfb, 0x06, 0x4c, 0xc7, 0x07, 0x5c, 0x6a, 0x96, 0x8a, 0xdc, 0x99, 0x2f, 0x24, 0xf7, 0xaf, + 0x09, 0xb9, 0x9f, 0x0f, 0xba, 0xca, 0x91, 0x33, 0xb9, 0xe2, 0x54, 0xeb, 0x66, 0xe2, 0xd6, 0x95, + 0xbc, 0x7e, 0x14, 0xcd, 0x49, 0x30, 0xbb, 0xd4, 0x9c, 0x3e, 0xb8, 0xd0, 0x9c, 0x94, 0x23, 0x58, + 0x6a, 0x72, 0x1b, 0x62, 0x19, 0x6d, 0x3a, 0x41, 0x14, 0x71, 0xde, 0x85, 0x4a, 0xcf, 0x71, 0xb1, + 0xed, 0xf3, 0xaa, 0x90, 0xa1, 0xae, 0xc7, 0x87, 0x56, 0xac, 0x53, 0xb2, 0xfa, 0x6d, 0x03, 0x90, + 0xca, 0xeb, 0x17, 0x63, 0xad, 0x45, 0xa1, 0xe0, 0x1d, 0xdf, 0xeb, 0x7b, 0xe1, 0x79, 0xcb, 0x6c, + 0xc5, 0xfc, 0x5d, 0x03, 0xae, 0x27, 0x46, 0xfc, 0x22, 0x24, 0x5f, 0x31, 0x6f, 0xc1, 0xd4, 0x1a, + 0x16, 0x67, 0xbc, 0x54, 0x36, 0x60, 0x17, 0x90, 0xda, 0x7b, 0x35, 0xa7, 0x98, 0x5f, 0x82, 0xa9, + 0x67, 0xde, 0x31, 0x71, 0xe4, 0xa4, 0x5b, 0xba, 0x29, 0x96, 0x9e, 0x8a, 0xf4, 0x15, 0x7d, 0x4b, + 0xd7, 0xbb, 0x0b, 0x48, 0x1d, 0x79, 0x15, 0xe2, 0x2c, 0x9b, 0xff, 0x6d, 0x40, 0xa5, 0xd9, 0xb3, + 0xfd, 0xbe, 0x10, 0xe5, 0x23, 0x28, 0xb0, 0x5c, 0x0b, 0x4f, 0x9c, 0xbe, 0x15, 0xe7, 0xa7, 0xd2, + 0xb2, 0x8f, 0x26, 0xcb, 0xcc, 0xf0, 0x51, 0x64, 0x2a, 0xbc, 0x56, 0xbc, 0x96, 0xa8, 0x1d, 0xaf, + 0xa1, 0xf7, 0x21, 0x6f, 0x93, 0x21, 0x34, 0xbc, 0x56, 0x93, 0x09, 0x30, 0xca, 0x8d, 0x5c, 0x89, + 0x2c, 0x46, 0x65, 0x7e, 0x08, 0x65, 0x05, 0x01, 0x15, 0x21, 0xfb, 0xa4, 0xc5, 0xaf, 0x49, 0xcd, + 0xd5, 0xf6, 0xc6, 0x0b, 0x96, 0x14, 0xac, 0x02, 0xac, 0xb5, 0xa2, 0xef, 0x8c, 0xa6, 0x54, 0x67, + 0x73, 0x3e, 0x3c, 0x6e, 0xa9, 0x12, 0x1a, 0xa3, 0x24, 0xcc, 0x5c, 0x44, 0x42, 0x09, 0xf1, 0x5b, + 0x06, 0x4c, 0x70, 0xd5, 0x5c, 0x36, 0x34, 0x53, 0xce, 0x23, 0x42, 0xb3, 0x32, 0x0d, 0x8b, 0x13, + 0x4a, 0x19, 0xfe, 0xc9, 0x80, 0xda, 0x9a, 0xf7, 0xca, 0x3d, 0xf0, 0xed, 0x6e, 0xb4, 0x07, 0x3f, + 0x4e, 0x98, 0x73, 0x21, 0x91, 0xbb, 0x4f, 0xd0, 0xcb, 0x86, 0x84, 0x59, 0xeb, 0x32, 0x97, 0xc2, + 0xe2, 0xbb, 0xf8, 0x34, 0xbf, 0x0a, 0x93, 0x89, 0x41, 0xc4, 0x40, 0x2f, 0x9a, 0x9b, 0x1b, 0x6b, + 0xc4, 0x20, 0x34, 0x83, 0xdb, 0xda, 0x6a, 0x3e, 0xde, 0x6c, 0xf1, 0x3a, 0x6b, 0x73, 0x6b, 0xb5, + 0xb5, 0x29, 0x0d, 0xf5, 0x50, 0xcc, 0xe0, 0xa1, 0xd9, 0x83, 0x29, 0x45, 0xa0, 0xcb, 0x96, 0xbb, + 0xf4, 0xf2, 0x4a, 0xb4, 0x3a, 0x4c, 0xf0, 0x53, 0x4e, 0x72, 0xe3, 0xff, 0x5f, 0x06, 0xaa, 0xa2, + 0xeb, 0xcb, 0x91, 0x02, 0xdd, 0x80, 0x42, 0x77, 0x7f, 0xd7, 0xf9, 0xb6, 0xa8, 0xb4, 0xf2, 0x2f, + 0xd2, 0xde, 0x63, 0x38, 0xec, 0xfd, 0x04, 0xff, 0x42, 0xb7, 0xd8, 0xd3, 0x8a, 0x0d, 0xb7, 0x8b, + 0x4f, 0xe8, 0x61, 0x28, 0x67, 0xc9, 0x06, 0x9a, 0xa6, 0xe4, 0xef, 0x2c, 0xe8, 0x5d, 0x57, 0x79, + 0x77, 0x81, 0x96, 0xa1, 0x46, 0x7e, 0x37, 0x07, 0x83, 0x9e, 0x83, 0xbb, 0x8c, 0x01, 0xb9, 0xe6, + 0xe6, 0xe4, 0x69, 0x27, 0x45, 0x80, 0xee, 0x40, 0x81, 0x5e, 0x01, 0x83, 0xfa, 0x38, 0x89, 0xab, + 0x92, 0x94, 0x37, 0xa3, 0x77, 0xa0, 0xcc, 0x24, 0xde, 0x70, 0x9f, 0x07, 0x98, 0xbe, 0x42, 0x50, + 0xf2, 0x21, 0x6a, 0x5f, 0xfc, 0x9c, 0x05, 0xe7, 0x9f, 0xb3, 0x6e, 0xc1, 0x54, 0x73, 0x18, 0x1e, + 0xb6, 0x5c, 0x12, 0xeb, 0x52, 0xb6, 0xb9, 0x0d, 0x88, 0xf4, 0xae, 0x39, 0x81, 0xb6, 0x9b, 0x0f, + 0xd6, 0x1a, 0xf6, 0xa1, 0xb9, 0x05, 0xd7, 0x48, 0x2f, 0x76, 0x43, 0xa7, 0xa3, 0x9c, 0x2b, 0xc4, + 0xc9, 0xd5, 0x48, 0x9c, 0x5c, 0xed, 0x20, 0x78, 0xe5, 0xf9, 0x5d, 0x6e, 0xbb, 0xe8, 0x5b, 0xa2, + 0xfd, 0x83, 0xc1, 0xa4, 0x79, 0x1e, 0xc4, 0x4e, 0x9d, 0x5f, 0x90, 0x1f, 0xfa, 0x65, 0x28, 0x7a, + 0x03, 0xfa, 0x66, 0x87, 0x27, 0xf3, 0x6e, 0x2c, 0xb0, 0x77, 0x40, 0x0b, 0x9c, 0xf1, 0x36, 0xeb, + 0x55, 0x12, 0x4e, 0x9c, 0x1e, 0x2d, 0x42, 0xf5, 0xd0, 0x0e, 0x0e, 0x71, 0x77, 0x47, 0x30, 0x8f, + 0xa5, 0x3a, 0x1f, 0x5a, 0x89, 0x6e, 0x29, 0xfb, 0x03, 0x29, 0xfa, 0x13, 0x1c, 0x9e, 0x21, 0xba, + 0x9a, 0x1e, 0xbf, 0x2e, 0x86, 0xf0, 0xaa, 0xde, 0x45, 0x46, 0xfd, 0xd0, 0x80, 0xdb, 0x62, 0xd8, + 0xea, 0xa1, 0xed, 0x1e, 0x60, 0x21, 0xcc, 0xcf, 0xab, 0xaf, 0xf4, 0xa4, 0xb3, 0x17, 0x9c, 0xf4, + 0x53, 0xa8, 0x47, 0x93, 0xa6, 0x89, 0x15, 0xaf, 0xa7, 0x4e, 0x62, 0x18, 0xf0, 0x0d, 0x5e, 0xb2, + 0xe8, 0x6f, 0xd2, 0xe6, 0x7b, 0xbd, 0xe8, 0x4e, 0x43, 0x7e, 0x4b, 0x66, 0x9b, 0x70, 0x53, 0x30, + 0xe3, 0x99, 0x8e, 0x38, 0xb7, 0xd4, 0x9c, 0xce, 0xe4, 0xc6, 0xed, 0x41, 0x78, 0x9c, 0xbd, 0x94, + 0xb4, 0x43, 0xe2, 0x26, 0xa4, 0x28, 0x86, 0x0e, 0x65, 0x86, 0xed, 0x00, 0x22, 0xb3, 0x72, 0xfc, + 0x4c, 0xf5, 0x13, 0x96, 0xda, 0x7e, 0xbe, 0x04, 0x48, 0x7f, 0x6a, 0x09, 0x8c, 0x46, 0xc5, 0x30, + 0x13, 0x09, 0x4a, 0xd4, 0xbe, 0x83, 0xfd, 0xbe, 0x13, 0x04, 0x4a, 0x9d, 0x48, 0xa7, 0xae, 0xb7, + 0x20, 0x37, 0xc0, 0x3c, 0x16, 0x97, 0x97, 0x90, 0xd8, 0x13, 0xca, 0x60, 0xda, 0x2f, 0x61, 0xfa, + 0x70, 0x47, 0xc0, 0x30, 0x83, 0x68, 0x71, 0x92, 0x62, 0x8a, 0x4c, 0x76, 0x66, 0x44, 0x26, 0x3b, + 0x1b, 0xcf, 0x64, 0xc7, 0xce, 0x87, 0xaa, 0xa3, 0xba, 0x9a, 0xf3, 0x61, 0x9b, 0x19, 0x20, 0xf2, + 0x6f, 0x57, 0xc3, 0xf5, 0x0f, 0xb8, 0xa3, 0xba, 0xaa, 0xa8, 0x86, 0xe9, 0x9c, 0x45, 0x15, 0x51, + 0x7c, 0x22, 0x13, 0x2a, 0xc4, 0x48, 0x96, 0x9a, 0xe2, 0xcf, 0x59, 0xb1, 0x36, 0xe9, 0x8c, 0x8f, + 0x60, 0x3a, 0xee, 0x8c, 0x2f, 0x25, 0xd4, 0x34, 0xe4, 0x43, 0xef, 0x08, 0x8b, 0x40, 0xcb, 0x3e, + 0x52, 0x6a, 0x8d, 0x1c, 0xf5, 0xd5, 0xa8, 0xf5, 0x9b, 0x92, 0x2b, 0xdd, 0x80, 0x97, 0x9d, 0x01, + 0x59, 0x8e, 0xe2, 0x2a, 0xcb, 0x3e, 0x24, 0xd6, 0x27, 0x70, 0x23, 0xe9, 0x7c, 0xaf, 0x66, 0x12, + 0x7b, 0x6c, 0x73, 0xea, 0xdc, 0xf3, 0xd5, 0x00, 0x7c, 0x26, 0xfd, 0xa4, 0xe2, 0x74, 0xaf, 0x86, + 0xf7, 0xaf, 0x43, 0x43, 0xe7, 0x83, 0xaf, 0x74, 0x2f, 0x46, 0x2e, 0xf9, 0x6a, 0xb8, 0x7e, 0xdf, + 0x90, 0x6c, 0xd5, 0x55, 0xf3, 0xe1, 0x17, 0x61, 0x2b, 0x62, 0xdd, 0xfd, 0x68, 0xf9, 0x2c, 0x46, + 0xde, 0x32, 0xab, 0xf7, 0x96, 0x72, 0x08, 0x25, 0x14, 0xfb, 0x4f, 0xba, 0xfa, 0x2f, 0x73, 0xf5, + 0x72, 0x30, 0x19, 0x77, 0x2e, 0x0b, 0x46, 0xc2, 0x73, 0x04, 0x46, 0x3f, 0x52, 0x5b, 0x45, 0x0d, + 0x52, 0x57, 0x63, 0xba, 0xdf, 0x90, 0x01, 0x26, 0x15, 0xc7, 0xae, 0x06, 0xc1, 0x86, 0xd9, 0xd1, + 0x21, 0xec, 0x4a, 0x20, 0xe6, 0x9b, 0x50, 0x8a, 0x2e, 0xb2, 0xca, 0x43, 0xda, 0x32, 0x14, 0xb7, + 0xb6, 0x77, 0x77, 0x9a, 0xab, 0xe4, 0x9e, 0x36, 0x0d, 0xc5, 0xd5, 0x6d, 0xcb, 0x7a, 0xbe, 0xd3, + 0x26, 0x17, 0xb5, 0xe4, 0xbb, 0x9a, 0xa5, 0x9f, 0x66, 0x21, 0xf3, 0xf4, 0x05, 0xfa, 0x14, 0xf2, + 0xec, 0x5d, 0xd7, 0x19, 0xcf, 0xfb, 0x1a, 0x67, 0x3d, 0x5d, 0x33, 0x5f, 0xfb, 0xde, 0x7f, 0xfe, + 0xf4, 0x0f, 0x33, 0x53, 0x66, 0x65, 0xf1, 0x78, 0x79, 0xf1, 0xe8, 0x78, 0x91, 0x06, 0xd9, 0x47, + 0xc6, 0x3c, 0xfa, 0x1a, 0x64, 0x77, 0x86, 0x21, 0x1a, 0xf9, 0xec, 0xaf, 0x31, 0xfa, 0x35, 0x9b, + 0x79, 0x9d, 0x32, 0x9d, 0x34, 0x81, 0x33, 0x1d, 0x0c, 0x43, 0xc2, 0xf2, 0x5b, 0x50, 0x56, 0xdf, + 0xa2, 0x9d, 0xfb, 0x16, 0xb0, 0x71, 0xfe, 0x3b, 0x37, 0xf3, 0x36, 0x85, 0x7a, 0xcd, 0x44, 0x1c, + 0x8a, 0xbd, 0x96, 0x53, 0x67, 0xd1, 0x3e, 0x71, 0xd1, 0xc8, 0x97, 0x82, 0x8d, 0xd1, 0x4f, 0xdf, + 0x52, 0xb3, 0x08, 0x4f, 0x5c, 0xc2, 0xf2, 0x9b, 0xfc, 0x8d, 0x5b, 0x27, 0x44, 0x77, 0x34, 0x8f, + 0x94, 0xd4, 0xc7, 0x37, 0x8d, 0xd9, 0xd1, 0x04, 0x1c, 0xe4, 0x16, 0x05, 0xb9, 0x61, 0x4e, 0x71, + 0x90, 0x4e, 0x44, 0xf2, 0xc8, 0x98, 0x5f, 0xea, 0x40, 0x9e, 0x96, 0x82, 0xd1, 0x67, 0xe2, 0x47, + 0x43, 0x53, 0x64, 0x1f, 0x61, 0xe8, 0x58, 0x11, 0xd9, 0x9c, 0xa6, 0x40, 0x55, 0xb3, 0x44, 0x80, + 0x68, 0x21, 0xf8, 0x91, 0x31, 0x3f, 0x67, 0xdc, 0x37, 0x96, 0xfe, 0x3a, 0x0f, 0x79, 0x5a, 0x72, + 0x40, 0x47, 0x00, 0xb2, 0xe4, 0x99, 0x9c, 0x5d, 0xaa, 0x9a, 0x9a, 0x9c, 0x5d, 0xba, 0x5a, 0x6a, + 0x36, 0x28, 0xe8, 0xb4, 0x39, 0x49, 0x40, 0x69, 0x25, 0x63, 0x91, 0x16, 0x6e, 0x88, 0x1e, 0x7f, + 0x68, 0xf0, 0xda, 0x0b, 0xdb, 0x66, 0x48, 0xc7, 0x2d, 0x56, 0xee, 0x4c, 0x2e, 0x07, 0x4d, 0x85, + 0xd3, 0x7c, 0x48, 0x01, 0x17, 0xcd, 0x9a, 0x04, 0xf4, 0x29, 0xc5, 0x23, 0x63, 0xfe, 0xb3, 0xba, + 0x79, 0x8d, 0x6b, 0x39, 0xd1, 0x83, 0xbe, 0x03, 0xd5, 0x78, 0x61, 0x0e, 0xdd, 0xd5, 0x60, 0x25, + 0x0b, 0x7d, 0x8d, 0x37, 0xcf, 0x26, 0xe2, 0x32, 0xcd, 0x50, 0x99, 0x38, 0x38, 0x43, 0x3e, 0xc2, + 0x78, 0x60, 0x13, 0x22, 0x6e, 0x03, 0xf4, 0xa7, 0x06, 0xaf, 0xad, 0xca, 0xba, 0x1a, 0xd2, 0x71, + 0x4f, 0x95, 0xef, 0x1a, 0xf7, 0xce, 0xa1, 0xe2, 0x42, 0x7c, 0x48, 0x85, 0xf8, 0xc0, 0x9c, 0x96, + 0x42, 0x84, 0x4e, 0x1f, 0x87, 0x1e, 0x97, 0xe2, 0xb3, 0x5b, 0xe6, 0x6b, 0x31, 0xe5, 0xc4, 0x7a, + 0xa5, 0xb1, 0x58, 0xfd, 0x4b, 0x6b, 0xac, 0x58, 0x89, 0x4d, 0x6b, 0xac, 0x78, 0xf1, 0x4c, 0x67, + 0x2c, 0x5e, 0xed, 0xd2, 0x18, 0x2b, 0xea, 0x59, 0xfa, 0xdf, 0x1c, 0x14, 0x57, 0xd9, 0xdf, 0xca, + 0x20, 0x0f, 0x4a, 0x51, 0x45, 0x08, 0xcd, 0xe8, 0x92, 0xce, 0xf2, 0x2a, 0xd7, 0xb8, 0x33, 0xb2, + 0x9f, 0x0b, 0xf4, 0x06, 0x15, 0xe8, 0x75, 0xf3, 0x06, 0x41, 0xe6, 0x7f, 0x8e, 0xb3, 0xc8, 0x52, + 0x93, 0x8b, 0x76, 0xb7, 0x4b, 0x14, 0xf1, 0x9b, 0x50, 0x51, 0xeb, 0x33, 0xe8, 0x0d, 0x6d, 0xa2, + 0x5b, 0x2d, 0xf6, 0x34, 0xcc, 0xb3, 0x48, 0x38, 0xf2, 0x9b, 0x14, 0x79, 0xc6, 0xbc, 0xa9, 0x41, + 0xf6, 0x29, 0x69, 0x0c, 0x9c, 0x15, 0x52, 0xf4, 0xe0, 0xb1, 0x8a, 0x8d, 0x1e, 0x3c, 0x5e, 0x87, + 0x39, 0x13, 0x7c, 0x48, 0x49, 0x09, 0x78, 0x00, 0x20, 0x2b, 0x1d, 0x48, 0xab, 0x4b, 0xe5, 0xc2, + 0x9a, 0x74, 0x0e, 0xe9, 0x22, 0x89, 0x69, 0x52, 0x58, 0xbe, 0xee, 0x12, 0xb0, 0x3d, 0x27, 0x08, + 0xd9, 0xc6, 0x9c, 0x88, 0xd5, 0x29, 0x90, 0x76, 0x3e, 0xf1, 0xb2, 0x47, 0xe3, 0xee, 0x99, 0x34, + 0x1c, 0xfd, 0x1e, 0x45, 0xbf, 0x63, 0x36, 0x34, 0xe8, 0x03, 0x46, 0x4b, 0x16, 0xdb, 0xff, 0x17, + 0xa0, 0xfc, 0xcc, 0x76, 0xdc, 0x10, 0xbb, 0xb6, 0xdb, 0xc1, 0x68, 0x1f, 0xf2, 0x34, 0x76, 0x27, + 0x1d, 0xb1, 0x9a, 0x96, 0x4f, 0x3a, 0xe2, 0x58, 0x5e, 0xda, 0x9c, 0xa5, 0xc0, 0x0d, 0xf3, 0x3a, + 0x01, 0xee, 0x4b, 0xd6, 0x8b, 0x2c, 0xa3, 0x6d, 0xcc, 0xa3, 0x97, 0x50, 0xe0, 0xf5, 0xe8, 0x04, + 0xa3, 0x58, 0x52, 0xad, 0x71, 0x4b, 0xdf, 0xa9, 0x5b, 0xcb, 0x2a, 0x4c, 0x40, 0xe9, 0x08, 0xce, + 0x31, 0x80, 0x2c, 0xaf, 0x24, 0x2d, 0x9a, 0x2a, 0xcb, 0x34, 0x66, 0x47, 0x13, 0xe8, 0x74, 0xaa, + 0x62, 0x76, 0x23, 0x5a, 0x82, 0xfb, 0x0d, 0xc8, 0xad, 0xdb, 0xc1, 0x21, 0x4a, 0xc4, 0x5e, 0xe5, + 0x41, 0x68, 0xa3, 0xa1, 0xeb, 0xe2, 0x28, 0x77, 0x28, 0xca, 0x4d, 0xe6, 0xca, 0x54, 0x14, 0xfa, + 0x40, 0xd2, 0x98, 0x47, 0x5d, 0x28, 0xb0, 0xd7, 0xa0, 0x49, 0xfd, 0xc5, 0x9e, 0x96, 0x26, 0xf5, + 0x17, 0x7f, 0x40, 0x7a, 0x3e, 0xca, 0x00, 0xc6, 0xc5, 0x1b, 0x4b, 0x94, 0x78, 0x99, 0x92, 0x78, + 0x98, 0xd9, 0x98, 0x19, 0xd5, 0xcd, 0xb1, 0xee, 0x52, 0xac, 0xdb, 0x66, 0x3d, 0x65, 0x2b, 0x4e, + 0xf9, 0xc8, 0x98, 0xbf, 0x6f, 0xa0, 0xef, 0x00, 0xc8, 0xfa, 0x53, 0x6a, 0x07, 0x26, 0x6b, 0x5a, + 0xa9, 0x1d, 0x98, 0x2a, 0x5d, 0x99, 0x0b, 0x14, 0x77, 0xce, 0xbc, 0x9b, 0xc4, 0x0d, 0x7d, 0xdb, + 0x0d, 0x5e, 0x62, 0xff, 0x7d, 0x96, 0xfc, 0x0e, 0x0e, 0x9d, 0x01, 0x99, 0xb2, 0x0f, 0xa5, 0xa8, + 0x3c, 0x90, 0xf4, 0xb6, 0xc9, 0x42, 0x46, 0xd2, 0xdb, 0xa6, 0xea, 0x0a, 0x71, 0xb7, 0x13, 0x5b, + 0x2d, 0x82, 0x94, 0x6c, 0xc0, 0xbf, 0xa8, 0x41, 0x8e, 0x1c, 0xc8, 0xc9, 0xe1, 0x44, 0x26, 0x7b, + 0x92, 0xb3, 0x4f, 0xe5, 0xab, 0x93, 0xb3, 0x4f, 0xe7, 0x89, 0xe2, 0x87, 0x13, 0x72, 0x59, 0x5b, + 0x64, 0x59, 0x14, 0x32, 0x53, 0x0f, 0xca, 0x4a, 0x12, 0x08, 0x69, 0x98, 0xc5, 0xf3, 0xdf, 0xc9, + 0x70, 0xa7, 0xc9, 0x20, 0x99, 0xaf, 0x53, 0xbc, 0xeb, 0x2c, 0xdc, 0x51, 0xbc, 0x2e, 0xa3, 0x20, + 0x80, 0x7c, 0x76, 0x7c, 0xdf, 0x6b, 0x66, 0x17, 0xdf, 0xfb, 0xb3, 0xa3, 0x09, 0x46, 0xce, 0x4e, + 0x6e, 0xfc, 0x57, 0x50, 0x51, 0x13, 0x3f, 0x48, 0x23, 0x7c, 0x22, 0x43, 0x9f, 0x8c, 0x23, 0xba, + 0xbc, 0x51, 0xdc, 0xb3, 0x51, 0x48, 0x5b, 0x21, 0x23, 0xc0, 0x3d, 0x28, 0xf2, 0x04, 0x90, 0x4e, + 0xa5, 0xf1, 0x24, 0xbe, 0x4e, 0xa5, 0x89, 0xec, 0x51, 0xfc, 0xf4, 0x4c, 0x11, 0xc9, 0x45, 0x54, + 0xc4, 0x6a, 0x8e, 0xf6, 0x04, 0x87, 0xa3, 0xd0, 0x64, 0xd2, 0x76, 0x14, 0x9a, 0x92, 0x1f, 0x18, + 0x85, 0x76, 0x80, 0x43, 0xee, 0x0f, 0xc4, 0xe5, 0x1a, 0x8d, 0x60, 0xa6, 0xc6, 0x47, 0xf3, 0x2c, + 0x12, 0xdd, 0xe5, 0x46, 0x02, 0x8a, 0xe0, 0x78, 0x02, 0x20, 0x93, 0x51, 0xc9, 0x13, 0xab, 0xb6, + 0x4e, 0x90, 0x3c, 0xb1, 0xea, 0xf3, 0x59, 0x71, 0xdf, 0x27, 0x71, 0xd9, 0xdd, 0x8a, 0x20, 0xff, + 0xd8, 0x00, 0x94, 0x4e, 0x57, 0xa1, 0x77, 0xf5, 0xdc, 0xb5, 0x35, 0x87, 0xc6, 0x7b, 0x17, 0x23, + 0xd6, 0x85, 0x33, 0x29, 0x52, 0x87, 0x52, 0x0f, 0x5e, 0x11, 0xa1, 0xbe, 0x6b, 0xc0, 0x44, 0x2c, + 0xc5, 0x85, 0xde, 0x1a, 0x61, 0xd3, 0x44, 0xe1, 0xa1, 0xf1, 0xf6, 0xb9, 0x74, 0xba, 0xa3, 0xbc, + 0xb2, 0x02, 0xc4, 0x9d, 0xe6, 0x77, 0x0c, 0xa8, 0xc6, 0x33, 0x61, 0x68, 0x04, 0xef, 0x54, 0xbd, + 0xa2, 0x31, 0x77, 0x3e, 0xe1, 0xd9, 0xe6, 0x91, 0xd7, 0x99, 0x1e, 0x14, 0x79, 0xca, 0x4c, 0xb7, + 0xf0, 0xe3, 0x05, 0x0e, 0xdd, 0xc2, 0x4f, 0xe4, 0xdb, 0x34, 0x0b, 0xdf, 0xf7, 0x7a, 0x58, 0xd9, + 0x66, 0x3c, 0x93, 0x36, 0x0a, 0xed, 0xec, 0x6d, 0x96, 0x48, 0xc3, 0x8d, 0x42, 0x93, 0xdb, 0x4c, + 0x24, 0xcc, 0xd0, 0x08, 0x66, 0xe7, 0x6c, 0xb3, 0x64, 0xbe, 0x4d, 0xb3, 0xcd, 0x28, 0xa0, 0xb2, + 0xcd, 0x64, 0x22, 0x4b, 0xb7, 0xcd, 0x52, 0xb5, 0x18, 0xdd, 0x36, 0x4b, 0xe7, 0xc2, 0x34, 0x76, + 0xa4, 0xb8, 0xb1, 0x6d, 0x76, 0x4d, 0x93, 0xea, 0x42, 0xef, 0x8d, 0x50, 0xa2, 0xb6, 0xb2, 0xd3, + 0x78, 0xff, 0x82, 0xd4, 0x23, 0xd7, 0x38, 0x53, 0xbf, 0x58, 0xe3, 0x7f, 0x64, 0xc0, 0xb4, 0x2e, + 0x3b, 0x86, 0x46, 0xe0, 0x8c, 0x28, 0x04, 0x35, 0x16, 0x2e, 0x4a, 0x7e, 0xb6, 0xb6, 0xa2, 0x55, + 0xff, 0xb8, 0xf6, 0xaf, 0x9f, 0xcf, 0x18, 0xff, 0xf1, 0xf9, 0x8c, 0xf1, 0x5f, 0x9f, 0xcf, 0x18, + 0x3f, 0xf9, 0x9f, 0x99, 0xb1, 0xfd, 0x02, 0xfd, 0x1f, 0x30, 0x2c, 0xff, 0x2c, 0x00, 0x00, 0xff, + 0xff, 0x0d, 0x96, 0x9e, 0x56, 0x27, 0x42, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -9581,6 +9611,13 @@ func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintRpc(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } if len(m.Blob) > 0 { i -= len(m.Blob) copy(dAtA[i:], m.Blob) @@ -13513,6 +13550,10 @@ func (m *SnapshotResponse) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -17865,6 +17906,38 @@ func (m *SnapshotResponse) Unmarshal(dAtA []byte) error { m.Blob = []byte{} } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto index 14391378ad..29bca1fdf7 100644 --- a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto +++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto @@ -4,6 +4,7 @@ package etcdserverpb; import "gogoproto/gogo.proto"; import "etcd/api/mvccpb/kv.proto"; import "etcd/api/authpb/auth.proto"; +import "etcd/api/versionpb/version.proto"; // for grpc-gateway import "google/api/annotations.proto"; @@ -388,13 +389,15 @@ service Auth { } message ResponseHeader { + option (versionpb.etcd_version_msg) = "3.0"; + // cluster_id is the ID of the cluster which sent the response. uint64 cluster_id = 1; // member_id is the ID of the member which sent the response. uint64 member_id = 2; // revision is the key-value store revision when the request was applied. // For watch progress responses, the header.revision indicates progress. All future events - // recieved in this stream are guaranteed to have a higher revision number than the + // received in this stream are guaranteed to have a higher revision number than the // header.revision number. int64 revision = 3; // raft_term is the raft term when the request was applied. @@ -402,17 +405,21 @@ message ResponseHeader { } message RangeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + enum SortOrder { - NONE = 0; // default, no sorting - ASCEND = 1; // lowest target value first - DESCEND = 2; // highest target value first + option (versionpb.etcd_version_enum) = "3.0"; + NONE = 0; // default, no sorting + ASCEND = 1; // lowest target value first + DESCEND = 2; // highest target value first } enum SortTarget { - KEY = 0; - VERSION = 1; - CREATE = 2; - MOD = 3; - VALUE = 4; + option (versionpb.etcd_version_enum) = "3.0"; + KEY = 0; + VERSION = 1; + CREATE = 2; + MOD = 3; + VALUE = 4; } // key is the first key for the range. If range_end is not given, the request only looks up key. @@ -453,22 +460,24 @@ message RangeRequest { // min_mod_revision is the lower bound for returned key mod revisions; all keys with // lesser mod revisions will be filtered away. - int64 min_mod_revision = 10; + int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"]; // max_mod_revision is the upper bound for returned key mod revisions; all keys with // greater mod revisions will be filtered away. - int64 max_mod_revision = 11; + int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"]; // min_create_revision is the lower bound for returned key create revisions; all keys with // lesser create revisions will be filtered away. - int64 min_create_revision = 12; + int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"]; // max_create_revision is the upper bound for returned key create revisions; all keys with // greater create revisions will be filtered away. - int64 max_create_revision = 13; + int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"]; } message RangeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // kvs is the list of key-value pairs matched by the range request. // kvs is empty when count is requested. @@ -480,6 +489,8 @@ message RangeResponse { } message PutRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the key, in bytes, to put into the key-value store. bytes key = 1; // value is the value, in bytes, to associate with the key in the key-value store. @@ -490,24 +501,28 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. - bool prev_kv = 4; + bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"]; // If ignore_value is set, etcd updates the key using its current value. // Returns an error if the key does not exist. - bool ignore_value = 5; + bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"]; // If ignore_lease is set, etcd updates the key using its current lease. // Returns an error if the key does not exist. - bool ignore_lease = 6; + bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"]; } message PutResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // if prev_kv is set in the request, the previous key-value pair will be returned. - mvccpb.KeyValue prev_kv = 2; + mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"]; } message DeleteRangeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the first key to delete in the range. bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). @@ -519,50 +534,61 @@ message DeleteRangeRequest { // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. // The previous key-value pairs will be returned in the delete response. - bool prev_kv = 3; + bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"]; } message DeleteRangeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // deleted is the number of keys deleted by the delete range request. int64 deleted = 2; // if prev_kv is set in the request, the previous key-value pairs will be returned. - repeated mvccpb.KeyValue prev_kvs = 3; + repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"]; } message RequestOp { + option (versionpb.etcd_version_msg) = "3.0"; // request is a union of request types accepted by a transaction. oneof request { RangeRequest request_range = 1; PutRequest request_put = 2; DeleteRangeRequest request_delete_range = 3; - TxnRequest request_txn = 4; + TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"]; } } message ResponseOp { + option (versionpb.etcd_version_msg) = "3.0"; + // response is a union of response types returned by a transaction. oneof response { RangeResponse response_range = 1; PutResponse response_put = 2; DeleteRangeResponse response_delete_range = 3; - TxnResponse response_txn = 4; + TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"]; } } message Compare { + option (versionpb.etcd_version_msg) = "3.0"; + enum CompareResult { + option (versionpb.etcd_version_enum) = "3.0"; + EQUAL = 0; GREATER = 1; LESS = 2; - NOT_EQUAL = 3; + NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"]; } enum CompareTarget { + option (versionpb.etcd_version_enum) = "3.0"; + VERSION = 0; CREATE = 1; MOD = 2; VALUE = 3; - LEASE = 4; + LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"]; } // result is logical comparison operation for this comparison. CompareResult result = 1; @@ -580,13 +606,13 @@ message Compare { // value is the value of the given key, in bytes. bytes value = 7; // lease is the lease id of the given key. - int64 lease = 8; + int64 lease = 8 [(versionpb.etcd_version_field)="3.3"]; // leave room for more target_union field tags, jump to 64 } // range_end compares the given target to all keys in the range [key, range_end). // See RangeRequest for more details on key ranges. - bytes range_end = 64; + bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"]; // TODO: fill out with most of the rest of RangeRequest fields when needed. } @@ -606,6 +632,8 @@ message Compare { // true. // 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false. message TxnRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // compare is a list of predicates representing a conjunction of terms. // If the comparisons succeed, then the success requests will be processed in order, // and the response will contain their respective responses in order. @@ -619,6 +647,8 @@ message TxnRequest { } message TxnResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // succeeded is set to true if the compare evaluated to true or false otherwise. bool succeeded = 2; @@ -630,6 +660,8 @@ message TxnResponse { // CompactionRequest compacts the key-value store up to a given revision. All superseded keys // with a revision less than the compaction revision will be removed. message CompactionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // revision is the key-value store revision for the compaction operation. int64 revision = 1; // physical is set so the RPC will wait until the compaction is physically @@ -639,18 +671,24 @@ message CompactionRequest { } message CompactionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message HashRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message HashKVRequest { + option (versionpb.etcd_version_msg) = "3.3"; // revision is the key-value store revision for the hash operation. int64 revision = 1; } message HashKVResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; // hash is the hash value computed from the responding member's MVCC keys up to a given revision. uint32 hash = 2; @@ -659,15 +697,20 @@ message HashKVResponse { } message HashResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // hash is the hash value computed from the responding member's KV's backend. uint32 hash = 2; } message SnapshotRequest { + option (versionpb.etcd_version_msg) = "3.3"; } message SnapshotResponse { + option (versionpb.etcd_version_msg) = "3.3"; + // header has the current key-value store information. The first header in the snapshot // stream indicates the point in time of the snapshot. ResponseHeader header = 1; @@ -677,18 +720,26 @@ message SnapshotResponse { // blob contains the next chunk of the snapshot in the snapshot stream. bytes blob = 3; + + // local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + string version = 4 [(versionpb.etcd_version_field)="3.6"]; } message WatchRequest { + option (versionpb.etcd_version_msg) = "3.0"; // request_union is a request to either create a new watcher or cancel an existing watcher. oneof request_union { WatchCreateRequest create_request = 1; WatchCancelRequest cancel_request = 2; - WatchProgressRequest progress_request = 3; + WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"]; } } message WatchCreateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // key is the key to register for watching. bytes key = 1; @@ -709,6 +760,8 @@ message WatchCreateRequest { bool progress_notify = 4; enum FilterType { + option (versionpb.etcd_version_enum) = "3.1"; + // filter out put event. NOPUT = 0; // filter out delete event. @@ -716,34 +769,38 @@ message WatchCreateRequest { } // filters filter the events at server side before it sends back to the watcher. - repeated FilterType filters = 5; + repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"]; // If prev_kv is set, created watcher gets the previous KV before the event happens. // If the previous KV is already compacted, nothing will be returned. - bool prev_kv = 6; + bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"]; // If watch_id is provided and non-zero, it will be assigned to this watcher. // Since creating a watcher in etcd is not a synchronous operation, // this can be used ensure that ordering is correct when creating multiple // watchers on the same stream. Creating a watcher with an ID already in // use on the stream will cause an error to be returned. - int64 watch_id = 7; + int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"]; // fragment enables splitting large revisions into multiple watch responses. - bool fragment = 8; + bool fragment = 8 [(versionpb.etcd_version_field)="3.4"]; } message WatchCancelRequest { + option (versionpb.etcd_version_msg) = "3.1"; // watch_id is the watcher id to cancel so that no more events are transmitted. - int64 watch_id = 1; + int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"]; } // Requests the a watch stream progress status be sent in the watch response stream as soon as // possible. message WatchProgressRequest { + option (versionpb.etcd_version_msg) = "3.4"; } message WatchResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // watch_id is the ID of the watcher that corresponds to the response. int64 watch_id = 2; @@ -769,15 +826,17 @@ message WatchResponse { int64 compact_revision = 5; // cancel_reason indicates the reason for canceling the watcher. - string cancel_reason = 6; + string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"]; // framgment is true if large watch response was split over multiple responses. - bool fragment = 7; + bool fragment = 7 [(versionpb.etcd_version_field)="3.4"]; repeated mvccpb.Event events = 11; } message LeaseGrantRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // TTL is the advisory time-to-live in seconds. Expired lease will return -1. int64 TTL = 1; // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID. @@ -785,6 +844,8 @@ message LeaseGrantRequest { } message LeaseGrantResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // ID is the lease ID for the granted lease. int64 ID = 2; @@ -794,15 +855,21 @@ message LeaseGrantResponse { } message LeaseRevokeRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. int64 ID = 1; } message LeaseRevokeResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message LeaseCheckpoint { + option (versionpb.etcd_version_msg) = "3.4"; + // ID is the lease ID to checkpoint. int64 ID = 1; @@ -811,19 +878,26 @@ message LeaseCheckpoint { } message LeaseCheckpointRequest { + option (versionpb.etcd_version_msg) = "3.4"; + repeated LeaseCheckpoint checkpoints = 1; } message LeaseCheckpointResponse { + option (versionpb.etcd_version_msg) = "3.4"; + ResponseHeader header = 1; } message LeaseKeepAliveRequest { + option (versionpb.etcd_version_msg) = "3.0"; // ID is the lease ID for the lease to keep alive. int64 ID = 1; } message LeaseKeepAliveResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // ID is the lease ID from the keep alive request. int64 ID = 2; @@ -832,6 +906,7 @@ message LeaseKeepAliveResponse { } message LeaseTimeToLiveRequest { + option (versionpb.etcd_version_msg) = "3.1"; // ID is the lease ID for the lease. int64 ID = 1; // keys is true to query all the keys attached to this lease. @@ -839,6 +914,8 @@ message LeaseTimeToLiveRequest { } message LeaseTimeToLiveResponse { + option (versionpb.etcd_version_msg) = "3.1"; + ResponseHeader header = 1; // ID is the lease ID from the keep alive request. int64 ID = 2; @@ -851,19 +928,26 @@ message LeaseTimeToLiveResponse { } message LeaseLeasesRequest { + option (versionpb.etcd_version_msg) = "3.3"; } message LeaseStatus { + option (versionpb.etcd_version_msg) = "3.3"; + int64 ID = 1; // TODO: int64 TTL = 2; } message LeaseLeasesResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; repeated LeaseStatus leases = 2; } message Member { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the member ID for this member. uint64 ID = 1; // name is the human-readable name of the member. If the member is not started, the name will be an empty string. @@ -873,17 +957,21 @@ message Member { // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty. repeated string clientURLs = 4; // isLearner indicates if the member is raft learner. - bool isLearner = 5; + bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"]; } message MemberAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // peerURLs is the list of URLs the added member will use to communicate with the cluster. repeated string peerURLs = 1; // isLearner indicates if the added member is raft learner. - bool isLearner = 2; + bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"]; } message MemberAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; @@ -892,17 +980,22 @@ message MemberAddResponse { } message MemberRemoveRequest { + option (versionpb.etcd_version_msg) = "3.0"; // ID is the member ID of the member to remove. uint64 ID = 1; } message MemberRemoveResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members after removing the member. repeated Member members = 2; } message MemberUpdateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // ID is the member ID of the member to update. uint64 ID = 1; // peerURLs is the new list of URLs the member will use to communicate with the cluster. @@ -910,59 +1003,80 @@ message MemberUpdateRequest { } message MemberUpdateResponse{ + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members after updating the member. - repeated Member members = 2; + repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"]; } message MemberListRequest { - bool linearizable = 1; + option (versionpb.etcd_version_msg) = "3.0"; + + bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"]; } message MemberListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // members is a list of all members associated with the cluster. repeated Member members = 2; } message MemberPromoteRequest { + option (versionpb.etcd_version_msg) = "3.4"; // ID is the member ID of the member to promote. uint64 ID = 1; } message MemberPromoteResponse { + option (versionpb.etcd_version_msg) = "3.4"; + ResponseHeader header = 1; // members is a list of all members after promoting the member. repeated Member members = 2; } message DefragmentRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message DefragmentResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message MoveLeaderRequest { + option (versionpb.etcd_version_msg) = "3.3"; // targetID is the node ID for the new leader. uint64 targetID = 1; } message MoveLeaderResponse { + option (versionpb.etcd_version_msg) = "3.3"; + ResponseHeader header = 1; } enum AlarmType { + option (versionpb.etcd_version_enum) = "3.0"; + NONE = 0; // default, used to query if any alarm is active NOSPACE = 1; // space quota is exhausted - CORRUPT = 2; // kv store corruption detected + CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected } message AlarmRequest { + option (versionpb.etcd_version_msg) = "3.0"; + enum AlarmAction { - GET = 0; - ACTIVATE = 1; - DEACTIVATE = 2; + option (versionpb.etcd_version_enum) = "3.0"; + + GET = 0; + ACTIVATE = 1; + DEACTIVATE = 2; } // action is the kind of alarm request to issue. The action // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a @@ -976,6 +1090,7 @@ message AlarmRequest { } message AlarmMember { + option (versionpb.etcd_version_msg) = "3.0"; // memberID is the ID of the member associated with the raised alarm. uint64 memberID = 1; // alarm is the type of alarm which has been raised. @@ -983,13 +1098,19 @@ message AlarmMember { } message AlarmResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // alarms is a list of alarms associated with the alarm request. repeated AlarmMember alarms = 2; } message DowngradeRequest { + option (versionpb.etcd_version_msg) = "3.5"; + enum DowngradeAction { + option (versionpb.etcd_version_enum) = "3.5"; + VALIDATE = 0; ENABLE = 1; CANCEL = 2; @@ -1004,15 +1125,20 @@ message DowngradeRequest { } message DowngradeResponse { + option (versionpb.etcd_version_msg) = "3.5"; + ResponseHeader header = 1; // version is the current cluster version. string version = 2; } message StatusRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message StatusResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // version is the cluster protocol version used by the responding member. string version = 2; @@ -1025,55 +1151,69 @@ message StatusResponse { // raftTerm is the current raft term of the responding member. uint64 raftTerm = 6; // raftAppliedIndex is the current raft applied index of the responding member. - uint64 raftAppliedIndex = 7; + uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"]; // errors contains alarm/health information and status. - repeated string errors = 8; + repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"]; // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member. - int64 dbSizeInUse = 9; + int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"]; // isLearner indicates if the member is raft learner. - bool isLearner = 10; + bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"]; } message AuthEnableRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthDisableRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthStatusRequest { + option (versionpb.etcd_version_msg) = "3.5"; } message AuthenticateRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string password = 2; } message AuthUserAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string password = 2; - authpb.UserAddOptions options = 3; - string hashedPassword = 4; + authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"]; + string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"]; } message AuthUserGetRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; } message AuthUserDeleteRequest { + option (versionpb.etcd_version_msg) = "3.0"; // name is the name of the user to delete. string name = 1; } message AuthUserChangePasswordRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the user whose password is being changed. string name = 1; // password is the new password for the user. Note that this field will be removed in the API layer. string password = 2; // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer. - string hashedPassword = 3; + string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"]; } message AuthUserGrantRoleRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // user is the name of the user which should be granted a given role. string user = 1; // role is the name of the role to grant to the user. @@ -1081,30 +1221,42 @@ message AuthUserGrantRoleRequest { } message AuthUserRevokeRoleRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string name = 1; string role = 2; } message AuthRoleAddRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the role to add to the authentication system. string name = 1; } message AuthRoleGetRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; } message AuthUserListRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthRoleListRequest { + option (versionpb.etcd_version_msg) = "3.0"; } message AuthRoleDeleteRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; } message AuthRoleGrantPermissionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + // name is the name of the role which will be granted the permission. string name = 1; // perm is the permission to grant to the role. @@ -1112,20 +1264,28 @@ message AuthRoleGrantPermissionRequest { } message AuthRoleRevokePermissionRequest { + option (versionpb.etcd_version_msg) = "3.0"; + string role = 1; bytes key = 2; bytes range_end = 3; } message AuthEnableResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthDisableResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthStatusResponse { + option (versionpb.etcd_version_msg) = "3.5"; + ResponseHeader header = 1; bool enabled = 2; // authRevision is the current revision of auth store @@ -1133,67 +1293,93 @@ message AuthStatusResponse { } message AuthenticateResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; // token is an authorized token that can be used in succeeding RPCs string token = 2; } message AuthUserAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserGetResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string roles = 2; } message AuthUserDeleteResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserChangePasswordResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserGrantRoleResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthUserRevokeRoleResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleAddResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleGetResponse { - ResponseHeader header = 1; + ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"]; - repeated authpb.Permission perm = 2; + repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"]; } message AuthRoleListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string roles = 2; } message AuthUserListResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; repeated string users = 2; } message AuthRoleDeleteResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleGrantPermissionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } message AuthRoleRevokePermissionResponse { + option (versionpb.etcd_version_msg) = "3.0"; + ResponseHeader header = 1; } diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go index cf0d428180..386185f0f8 100644 --- a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go @@ -11,6 +11,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" + _ "go.etcd.io/etcd/api/v3/versionpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -286,30 +287,33 @@ func init() { func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) } var fileDescriptor_949fe0d019050ef5 = []byte{ - // 367 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x4e, 0xf2, 0x40, - 0x14, 0x85, 0x99, 0x42, 0xf8, 0xdb, 0xcb, 0x1f, 0xc4, 0x09, 0x89, 0x8d, 0x68, 0x25, 0x5d, 0xb1, - 0x30, 0x98, 0xe8, 0x13, 0xa0, 0xb0, 0x20, 0x81, 0xcd, 0x18, 0xdd, 0x92, 0x56, 0x2e, 0xd8, 0xa4, - 0x74, 0xea, 0xcc, 0x54, 0xd7, 0xbe, 0x85, 0x4f, 0xe0, 0xb3, 0xb0, 0xf4, 0x11, 0x14, 0x5f, 0xc4, - 0x74, 0x5a, 0x4a, 0x49, 0xdc, 0xb8, 0xbb, 0x3d, 0xbd, 0xf7, 0x9c, 0xf3, 0x35, 0x85, 0xd6, 0x0a, - 0x57, 0x3e, 0x0a, 0xf9, 0x18, 0xc4, 0xfd, 0x58, 0x70, 0xc5, 0xe9, 0xff, 0x9d, 0x12, 0xfb, 0xc7, - 0xed, 0x25, 0x5f, 0x72, 0xfd, 0xe2, 0x22, 0x9d, 0xb2, 0x1d, 0x77, 0x02, 0x4d, 0xe6, 0x2d, 0xd4, - 0x40, 0x29, 0x11, 0xf8, 0x89, 0x42, 0x49, 0x3b, 0x60, 0xc5, 0x88, 0x62, 0x96, 0x88, 0x50, 0xda, - 0xa4, 0x5b, 0xed, 0x59, 0xcc, 0x4c, 0x85, 0x3b, 0x11, 0x4a, 0x7a, 0x0a, 0x10, 0xc8, 0x59, 0x88, - 0x9e, 0x88, 0x50, 0xd8, 0x46, 0x97, 0xf4, 0x4c, 0x66, 0x05, 0x72, 0x92, 0x09, 0xee, 0x00, 0xa0, - 0xe4, 0x44, 0xa1, 0x16, 0x79, 0x2b, 0xb4, 0x49, 0x97, 0xf4, 0x2c, 0xa6, 0x67, 0x7a, 0x06, 0x8d, - 0x87, 0x30, 0xc0, 0x48, 0x65, 0xfe, 0x86, 0xf6, 0x87, 0x4c, 0x4a, 0x13, 0xdc, 0x77, 0x02, 0xf5, - 0xa9, 0xee, 0x4d, 0x9b, 0x60, 0x8c, 0x87, 0xfa, 0xba, 0xc6, 0x8c, 0xf1, 0x90, 0x8e, 0xe0, 0x40, - 0x78, 0x0b, 0x35, 0xf3, 0x8a, 0x08, 0xdd, 0xa0, 0x71, 0x79, 0xd2, 0x2f, 0x93, 0xf6, 0xf7, 0x81, - 0x58, 0x53, 0xec, 0x03, 0x8e, 0xe0, 0x30, 0x5b, 0x2f, 0x1b, 0x55, 0xb5, 0x91, 0xbd, 0x6f, 0x54, - 0x32, 0xc9, 0xbf, 0xee, 0x4e, 0x71, 0xcf, 0xc1, 0xbe, 0x09, 0x13, 0xa9, 0x50, 0xdc, 0xa3, 0x90, - 0x01, 0x8f, 0x6e, 0x51, 0x31, 0x7c, 0x4a, 0x50, 0x2a, 0xda, 0x82, 0xea, 0x33, 0x8a, 0x1c, 0x3c, - 0x1d, 0xdd, 0x57, 0x02, 0x9d, 0x7c, 0x7d, 0x5a, 0x38, 0x95, 0x2e, 0x3a, 0x60, 0xe5, 0xa5, 0x0a, - 0x64, 0x33, 0x13, 0x34, 0xf8, 0x2f, 0x8d, 0x8d, 0x3f, 0x37, 0x1e, 0xc1, 0xd1, 0x90, 0xbf, 0x44, - 0x4b, 0xe1, 0xcd, 0x71, 0x1c, 0x2d, 0x78, 0x29, 0xde, 0x86, 0x7f, 0x18, 0x79, 0x7e, 0x88, 0x73, - 0x1d, 0x6e, 0xb2, 0xed, 0xe3, 0x16, 0xc5, 0x28, 0x50, 0xae, 0xdb, 0xeb, 0x2f, 0xa7, 0xb2, 0xde, - 0x38, 0xe4, 0x63, 0xe3, 0x90, 0xcf, 0x8d, 0x43, 0xde, 0xbe, 0x9d, 0x8a, 0x5f, 0xd7, 0xff, 0xd3, - 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x7d, 0x0b, 0x87, 0x02, 0x00, 0x00, + // 401 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xcd, 0xae, 0xd2, 0x40, + 0x14, 0xbe, 0xd3, 0xde, 0xdc, 0xdb, 0x9e, 0x6b, 0x10, 0x27, 0x24, 0x36, 0xa0, 0xb5, 0x61, 0xc5, + 0xaa, 0x24, 0x12, 0x36, 0xee, 0x54, 0x58, 0x60, 0xc4, 0xc5, 0x18, 0xdc, 0x92, 0x29, 0x1c, 0xb0, + 0x49, 0x69, 0xeb, 0xcc, 0x14, 0xf7, 0x2e, 0x7d, 0x02, 0xdf, 0xc2, 0x95, 0xef, 0xc0, 0xd2, 0x47, + 0x50, 0x7c, 0x11, 0xd3, 0x99, 0x42, 0x4b, 0x74, 0x75, 0x77, 0xa7, 0x5f, 0xcf, 0xf9, 0xfe, 0x5a, + 0x68, 0xef, 0x70, 0x17, 0xa1, 0x90, 0x1f, 0xe3, 0x3c, 0xcc, 0x45, 0xa6, 0x32, 0xfa, 0xa0, 0x46, + 0xf2, 0xa8, 0xdb, 0xd9, 0x66, 0xdb, 0x4c, 0xbf, 0x18, 0x96, 0x93, 0xd9, 0xe9, 0x06, 0xa8, 0x56, + 0xeb, 0x21, 0xcf, 0xe3, 0xe1, 0x1e, 0x85, 0x8c, 0xb3, 0x34, 0x8f, 0x4e, 0x93, 0xd9, 0xe8, 0x2f, + 0xa0, 0xc5, 0xf8, 0x46, 0xbd, 0x54, 0x4a, 0xc4, 0x51, 0xa1, 0x50, 0xd2, 0x1e, 0xb8, 0x39, 0xa2, + 0x58, 0x16, 0x22, 0x91, 0x1e, 0x09, 0xec, 0x81, 0xcb, 0x9c, 0x12, 0x58, 0x88, 0x44, 0xd2, 0xa7, + 0x00, 0xb1, 0x5c, 0x26, 0xc8, 0x45, 0x8a, 0xc2, 0xb3, 0x02, 0x32, 0x70, 0x98, 0x1b, 0xcb, 0xb7, + 0x06, 0x78, 0x71, 0xfb, 0xe5, 0x87, 0x67, 0x8f, 0xc2, 0x71, 0xff, 0x0d, 0x40, 0x83, 0x92, 0xc2, + 0x75, 0xca, 0x77, 0xe8, 0x91, 0x80, 0x0c, 0x5c, 0xa6, 0x67, 0xfa, 0x0c, 0xee, 0x56, 0x49, 0x8c, + 0xa9, 0x32, 0x42, 0x96, 0x16, 0x02, 0x03, 0x95, 0x52, 0x35, 0xd7, 0x77, 0x02, 0x37, 0x73, 0x9d, + 0x95, 0xb6, 0xc0, 0x9a, 0x4d, 0x34, 0xcd, 0x35, 0xb3, 0x66, 0x13, 0x3a, 0x85, 0x87, 0x82, 0x6f, + 0xd4, 0x92, 0x9f, 0xb5, 0xb4, 0xa7, 0xbb, 0xe7, 0x4f, 0xc2, 0x66, 0x3b, 0xe1, 0x65, 0x44, 0xd6, + 0x12, 0x97, 0x91, 0xa7, 0xf0, 0xc8, 0xac, 0x37, 0x89, 0x6c, 0x4d, 0xe4, 0x5d, 0x12, 0x35, 0x48, + 0xaa, 0x2f, 0x52, 0x23, 0xb5, 0xe3, 0x31, 0x78, 0xaf, 0x93, 0x42, 0x2a, 0x14, 0x1f, 0x4c, 0xd9, + 0xef, 0x51, 0x31, 0xfc, 0x54, 0xa0, 0x54, 0xb4, 0x0d, 0xf6, 0x1e, 0x45, 0x55, 0x45, 0x39, 0xd6, + 0x67, 0x5f, 0x09, 0xf4, 0xaa, 0xbb, 0xf9, 0x99, 0xbb, 0x71, 0xda, 0x03, 0xb7, 0xb2, 0x79, 0x2e, + 0xc1, 0x31, 0x80, 0xae, 0xe2, 0x3f, 0x19, 0xac, 0xfb, 0x67, 0x78, 0x07, 0x8f, 0x27, 0xd9, 0xe7, + 0x74, 0x2b, 0xf8, 0x1a, 0x67, 0xe9, 0x26, 0x6b, 0xf8, 0xf0, 0xe0, 0x16, 0x53, 0x1e, 0x25, 0xb8, + 0xd6, 0x2e, 0x1c, 0x76, 0x7a, 0x3c, 0x85, 0xb3, 0xfe, 0x0d, 0xf7, 0xaa, 0x73, 0xf8, 0xed, 0x5f, + 0x1d, 0x8e, 0x3e, 0xf9, 0x79, 0xf4, 0xc9, 0xaf, 0xa3, 0x4f, 0xbe, 0xfd, 0xf1, 0xaf, 0xa2, 0x1b, + 0xfd, 0x17, 0x8e, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xa3, 0xbd, 0xee, 0xdf, 0x02, 0x00, + 0x00, } func (m *RaftAttributes) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto index e63e9ecc99..cb7254f1cf 100644 --- a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto +++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package membershippb; import "gogoproto/gogo.proto"; +import "etcd/api/versionpb/version.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; @@ -10,6 +11,8 @@ option (gogoproto.goproto_getters_all) = false; // RaftAttributes represents the raft related attributes of an etcd member. message RaftAttributes { + option (versionpb.etcd_version_msg) = "3.5"; + // peerURLs is the list of peers in the raft cluster. repeated string peer_urls = 1; // isLearner indicates if the member is raft learner. @@ -18,26 +21,36 @@ message RaftAttributes { // Attributes represents all the non-raft related attributes of an etcd member. message Attributes { + option (versionpb.etcd_version_msg) = "3.5"; + string name = 1; repeated string client_urls = 2; } message Member { + option (versionpb.etcd_version_msg) = "3.5"; + uint64 ID = 1; RaftAttributes raft_attributes = 2; Attributes member_attributes = 3; } message ClusterVersionSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + string ver = 1; } message ClusterMemberAttrSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + uint64 member_ID = 1; Attributes member_attributes = 2; } message DowngradeInfoSetRequest { + option (versionpb.etcd_version_msg) = "3.5"; + bool enabled = 1; string ver = 2; } \ No newline at end of file diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go index ae112ae131..163e63b22c 100644 --- a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go +++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go @@ -21,15 +21,17 @@ import ( // server-side error var ( - ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() - ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() - ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() - ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() - ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() - ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() - ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() - ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() - ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() + ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err() + ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err() + ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err() + ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err() + ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err() + ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err() + ErrGRPCInvalidClientAPIVersion = status.New(codes.InvalidArgument, "etcdserver: invalid client api version").Err() + ErrGRPCInvalidSortOption = status.New(codes.InvalidArgument, "etcdserver: invalid sort option").Err() + ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err() + ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err() + ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err() ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err() ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err() @@ -70,20 +72,19 @@ var ( ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err() ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err() ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err() - ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err() + ErrGRPCNotCapable = status.New(codes.FailedPrecondition, "etcdserver: not capable").Err() ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err() ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err() ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err() ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err() - ErrGRPCTimeoutWaitAppliedIndex = status.New(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long").Err() ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err() ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err() - ErrGPRCNotSupportedForLearner = status.New(codes.Unavailable, "etcdserver: rpc not supported for learner").Err() + ErrGRPCNotSupportedForLearner = status.New(codes.FailedPrecondition, "etcdserver: rpc not supported for learner").Err() ErrGRPCBadLeaderTransferee = status.New(codes.FailedPrecondition, "etcdserver: bad leader transferee").Err() - ErrGRPCClusterVersionUnavailable = status.New(codes.Unavailable, "etcdserver: cluster version not found during downgrade").Err() ErrGRPCWrongDowngradeVersionFormat = status.New(codes.InvalidArgument, "etcdserver: wrong downgrade target version format").Err() ErrGRPCInvalidDowngradeTargetVersion = status.New(codes.InvalidArgument, "etcdserver: invalid downgrade target version").Err() + ErrGRPCClusterVersionUnavailable = status.New(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade").Err() ErrGRPCDowngradeInProcess = status.New(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress").Err() ErrGRPCNoInflightDowngrade = status.New(codes.FailedPrecondition, "etcdserver: no inflight downgrade job").Err() @@ -96,11 +97,12 @@ var ( ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, - ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, - ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, - ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, - ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, - ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, + ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, + ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, + ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption, + ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, + ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev, + ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace, ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound, ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist, @@ -145,7 +147,7 @@ var ( ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost, ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy, ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt, - ErrorDesc(ErrGPRCNotSupportedForLearner): ErrGPRCNotSupportedForLearner, + ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner, ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee, ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable, @@ -158,15 +160,16 @@ var ( // client-side error var ( - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrKeyNotFound = Error(ErrGRPCKeyNotFound) - ErrValueProvided = Error(ErrGRPCValueProvided) - ErrLeaseProvided = Error(ErrGRPCLeaseProvided) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -209,7 +212,6 @@ var ( ErrTimeout = Error(ErrGRPCTimeout) ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail) ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost) - ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex) ErrUnhealthy = Error(ErrGRPCUnhealthy) ErrCorrupt = Error(ErrGRPCCorrupt) ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee) diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go index f833c76d66..07cffa09df 100644 --- a/vendor/go.etcd.io/etcd/api/v3/version/version.go +++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.5.4" + Version = "3.6.0-alpha.0" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go new file mode 100644 index 0000000000..8e5ce7ec2a --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go @@ -0,0 +1,90 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: version.proto + +package versionpb + +import ( + fmt "fmt" + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +var E_EtcdVersionMsg = &proto.ExtensionDesc{ + ExtendedType: (*protobuf.MessageOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50000, + Name: "versionpb.etcd_version_msg", + Tag: "bytes,50000,opt,name=etcd_version_msg", + Filename: "version.proto", +} + +var E_EtcdVersionField = &proto.ExtensionDesc{ + ExtendedType: (*protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50001, + Name: "versionpb.etcd_version_field", + Tag: "bytes,50001,opt,name=etcd_version_field", + Filename: "version.proto", +} + +var E_EtcdVersionEnum = &proto.ExtensionDesc{ + ExtendedType: (*protobuf.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50002, + Name: "versionpb.etcd_version_enum", + Tag: "bytes,50002,opt,name=etcd_version_enum", + Filename: "version.proto", +} + +var E_EtcdVersionEnumValue = &proto.ExtensionDesc{ + ExtendedType: (*protobuf.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 50003, + Name: "versionpb.etcd_version_enum_value", + Tag: "bytes,50003,opt,name=etcd_version_enum_value", + Filename: "version.proto", +} + +func init() { + proto.RegisterExtension(E_EtcdVersionMsg) + proto.RegisterExtension(E_EtcdVersionField) + proto.RegisterExtension(E_EtcdVersionEnum) + proto.RegisterExtension(E_EtcdVersionEnumValue) +} + +func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) } + +var fileDescriptor_7d2c07d79758f814 = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4b, 0x2d, 0x2a, + 0xce, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0x72, 0x0b, 0x92, 0xa4, + 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xa2, 0xfa, 0x20, 0x16, 0x44, 0x81, 0x94, 0x42, 0x7a, 0x7e, + 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x92, 0x5a, 0x9c, 0x5c, 0x94, + 0x59, 0x50, 0x92, 0x5f, 0x04, 0x51, 0x61, 0xe5, 0xc7, 0x25, 0x90, 0x5a, 0x92, 0x9c, 0x12, 0x0f, + 0x35, 0x29, 0x3e, 0xb7, 0x38, 0x5d, 0x48, 0x5e, 0x0f, 0xa2, 0x4d, 0x0f, 0xa6, 0x4d, 0xcf, 0x37, + 0xb5, 0xb8, 0x38, 0x31, 0x3d, 0xd5, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0xe2, 0x42, 0x1b, + 0xb3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x1f, 0x48, 0x6b, 0x18, 0x44, 0xa7, 0x6f, 0x71, 0x7a, 0x07, + 0x23, 0xa3, 0x55, 0x00, 0x97, 0x10, 0x8a, 0x79, 0x69, 0x99, 0xa9, 0x39, 0x29, 0x42, 0xb2, 0x18, + 0x26, 0xba, 0x81, 0xc4, 0x61, 0xe6, 0x5d, 0x84, 0x9a, 0x27, 0x80, 0x64, 0x1e, 0x58, 0x01, 0xc8, + 0x44, 0x5f, 0x2e, 0x41, 0x14, 0x13, 0x53, 0xf3, 0x4a, 0x73, 0x85, 0x64, 0x30, 0x0c, 0x74, 0xcd, + 0x2b, 0xcd, 0x85, 0x99, 0x77, 0x09, 0x6a, 0x1e, 0x3f, 0x92, 0x79, 0x20, 0x79, 0x90, 0x71, 0xb1, + 0x5c, 0xe2, 0x18, 0xc6, 0xc5, 0x97, 0x25, 0xe6, 0x94, 0xa6, 0x0a, 0x29, 0x62, 0x35, 0x34, 0x0c, + 0x24, 0x07, 0x33, 0xf9, 0x32, 0xd4, 0x64, 0x11, 0x34, 0x93, 0xc1, 0x8a, 0x3a, 0x18, 0x19, 0x9d, + 0x04, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, + 0xe5, 0x18, 0x92, 0xd8, 0xc0, 0xa6, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x77, 0x44, 0xe2, + 0xa4, 0xbc, 0x01, 0x00, 0x00, +} diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto new file mode 100644 index 0000000000..27cfb5d40c --- /dev/null +++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package versionpb; + +import "gogoproto/gogo.proto"; +import "google/protobuf/descriptor.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message. +extend google.protobuf.MessageOptions { + optional string etcd_version_msg = 50000; +} + +// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field. +extend google.protobuf.FieldOptions { + optional string etcd_version_field = 50001; +} + +// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum. +extend google.protobuf.EnumOptions { + optional string etcd_version_enum = 50002; +} + +// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value. +extend google.protobuf.EnumValueOptions { + optional string etcd_version_enum_value = 50003; +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go index e442c3c92e..582b562313 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go @@ -17,7 +17,6 @@ package fileutil import ( "fmt" "io" - "io/ioutil" "os" "path/filepath" @@ -36,7 +35,7 @@ func IsDirWriteable(dir string) error { if err != nil { return err } - if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil { + if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil { return err } return os.Remove(f) @@ -44,16 +43,12 @@ func IsDirWriteable(dir string) error { // TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory // does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(dir string) error { +func TouchDirAll(lg *zap.Logger, dir string) error { // If path is already a directory, MkdirAll does nothing and returns nil, so, // first check if dir exist with an expected permission mode. if Exist(dir) { err := CheckDirPermission(dir, PrivateDirMode) if err != nil { - lg, _ := zap.NewProduction() - if lg == nil { - lg = zap.NewExample() - } lg.Warn("check file permission", zap.Error(err)) } } else { @@ -70,8 +65,8 @@ func TouchDirAll(dir string) error { // CreateDirAll is similar to TouchDirAll but returns error // if the deepest directory was not empty. -func CreateDirAll(dir string) error { - err := TouchDirAll(dir) +func CreateDirAll(lg *zap.Logger, dir string) error { + err := TouchDirAll(lg, dir) if err == nil { var ns []string ns, err = ReadDir(dir) diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go index 5cbf2bc3d5..4e938b67c7 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go @@ -22,31 +22,18 @@ import ( "fmt" "os" "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") + "golang.org/x/sys/windows" ) -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - LOCKFILE_EXCLUSIVE_LOCK = 2 - LOCKFILE_FAIL_IMMEDIATELY = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) +var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file") func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { f, err := open(path, flag, perm) if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil { + if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil { f.Close() return nil, err } @@ -58,7 +45,7 @@ func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { if err != nil { return nil, err } - if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil { + if err := lockFile(windows.Handle(f.Fd()), 0); err != nil { f.Close() return nil, err } @@ -95,32 +82,17 @@ func open(path string, flag int, perm os.FileMode) (*os.File, error) { return os.NewFile(uintptr(fd), path), nil } -func lockFile(fd syscall.Handle, flags uint32) error { - var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK - flag |= flags - if fd == syscall.InvalidHandle { +func lockFile(fd windows.Handle, flags uint32) error { + if fd == windows.InvalidHandle { return nil } - err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{}) + err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{}) if err == nil { return nil } else if err.Error() == errLocked.Error() { return ErrLocked - } else if err != errLockViolation { + } else if err != windows.ERROR_LOCK_VIOLATION { return err } return nil } - -func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved uint32 = 0 - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return err -} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go new file mode 100644 index 0000000000..494ab33fb9 --- /dev/null +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go @@ -0,0 +1,38 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import "fmt" + +const ( + JsonLogFormat = "json" + ConsoleLogFormat = "console" +) + +var DefaultLogFormat = JsonLogFormat + +// ConvertToZapFormat converts and validated log format string. +func ConvertToZapFormat(format string) (string, error) { + switch format { + case ConsoleLogFormat: + return ConsoleLogFormat, nil + case JsonLogFormat: + return JsonLogFormat, nil + case "": + return DefaultLogFormat, nil + default: + return "", fmt.Errorf("unknown log format: %s, supported values json, console", format) + } +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go index d7fd0d90db..33d95e9cb1 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go @@ -21,17 +21,6 @@ import ( "go.uber.org/zap/zapcore" ) -// CreateDefaultZapLogger creates a logger with default zap configuration -func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) { - lcfg := DefaultZapLoggerConfig - lcfg.Level = zap.NewAtomicLevelAt(level) - c, err := lcfg.Build() - if err != nil { - return nil, err - } - return c, nil -} - // DefaultZapLoggerConfig defines default zap logger configuration. var DefaultZapLoggerConfig = zap.Config{ Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)), @@ -42,7 +31,7 @@ var DefaultZapLoggerConfig = zap.Config{ Thereafter: 100, }, - Encoding: "json", + Encoding: DefaultLogFormat, // copied from "zap.NewProductionEncoderConfig" with some updates EncoderConfig: zapcore.EncoderConfig{ diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go index 948c683490..21c377e352 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go @@ -106,9 +106,10 @@ func GetClient(service, domain string, serviceName string) (*SRVClients, error) return err } for _, srv := range addrs { + shortHost := strings.TrimSuffix(srv.Target, ".") urls = append(urls, &url.URL{ Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + Host: net.JoinHostPort(shortHost, fmt.Sprintf("%d", srv.Port)), }) } srvs = append(srvs, addrs...) diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/assert.go b/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/assert.go index e8e042021e..ef820748e6 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/assert.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/assert.go @@ -15,30 +15,23 @@ package testutil import ( - "fmt" "reflect" "testing" + + "github.com/stretchr/testify/assert" ) -func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { - t.Helper() - if (e == nil || a == nil) && (isNil(e) && isNil(a)) { - return - } - if reflect.DeepEqual(e, a) { - return - } - s := "" - if len(msg) > 1 { - s = msg[0] + ": " +func copyToInterface(msg ...string) []interface{} { + newMsg := make([]interface{}, len(msg)) + for i, v := range msg { + newMsg[i] = v } - s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) - FatalStack(t, s) + return newMsg } func AssertNil(t *testing.T, v interface{}) { t.Helper() - AssertEqual(t, nil, v) + assert.Nil(t, v) } func AssertNotNil(t *testing.T, v interface{}) { @@ -50,12 +43,14 @@ func AssertNotNil(t *testing.T, v interface{}) { func AssertTrue(t *testing.T, v bool, msg ...string) { t.Helper() - AssertEqual(t, true, v, msg...) + newMsg := copyToInterface(msg...) + assert.Equal(t, true, v, newMsg) } func AssertFalse(t *testing.T, v bool, msg ...string) { t.Helper() - AssertEqual(t, false, v, msg...) + newMsg := copyToInterface(msg...) + assert.Equal(t, false, v, newMsg) } func isNil(v interface{}) bool { diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/testingtb.go b/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/testingtb.go index 970542c040..bafaccf984 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/testingtb.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/testutil/testingtb.go @@ -15,7 +15,6 @@ package testutil import ( - "io/ioutil" "log" "os" ) @@ -112,7 +111,7 @@ func (t *testingTBProthesis) Name() string { } func (t *testingTBProthesis) TempDir() string { - dir, err := ioutil.TempDir("", t.name) + dir, err := os.MkdirTemp("", t.name) if err != nil { t.Fatal(err) } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go index 3a5aef089a..0f79865e80 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go @@ -18,7 +18,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" - "io/ioutil" + "os" ) // NewCertPool creates x509 certPool with provided CA files. @@ -26,7 +26,7 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { certPool := x509.NewCertPool() for _, CAFile := range CAFiles { - pemByte, err := ioutil.ReadFile(CAFile) + pemByte, err := os.ReadFile(CAFile) if err != nil { return nil, err } @@ -51,12 +51,12 @@ func NewCertPool(CAFiles []string) (*x509.CertPool, error) { // NewCert generates TLS cert by using the given cert,key and parse function. func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) { - cert, err := ioutil.ReadFile(certfile) + cert, err := os.ReadFile(certfile) if err != nil { return nil, err } - key, err := ioutil.ReadFile(keyfile) + key, err := os.ReadFile(keyfile) if err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go index 992c773eaa..2a5ec9a02d 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go @@ -192,7 +192,7 @@ func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertVali ) return } - err = fileutil.TouchDirAll(dirpath) + err = fileutil.TouchDirAll(lg, dirpath) if err != nil { if info.Logger != nil { info.Logger.Warn( diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go index ad4f6904da..7536f6aff4 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go index 6f1600945c..37b17ec275 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go @@ -19,8 +19,8 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" + "os" "strings" "sync" ) @@ -168,7 +168,7 @@ func (l *tlsListener) acceptLoop() { func checkCRL(crlPath string, cert []*x509.Certificate) error { // TODO: cache - crlBytes, err := ioutil.ReadFile(crlPath) + crlBytes, err := os.ReadFile(crlPath) if err != nil { return err } diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go index 38548ddd71..49b48dc876 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package transport import ( @@ -21,12 +35,12 @@ type SocketOpts struct { // in which case lock on data file could result in unexpected // condition. User should take caution to protect against lock race. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReusePort bool + ReusePort bool `json:"reuse-port"` // ReuseAddress enables a socket option SO_REUSEADDR which allows // binding to an address in `TIME_WAIT` state. Useful to improve MTTR // in cases where etcd slow to restart due to excessive `TIME_WAIT`. // [1] https://man7.org/linux/man-pages/man7/socket.7.html - ReuseAddress bool + ReuseAddress bool `json:"reuse-address"` } func getControls(sopts *SocketOpts) Controls { diff --git a/vendor/github.com/sigstore/fulcio/pkg/api/metrics.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go similarity index 54% rename from vendor/github.com/sigstore/fulcio/pkg/api/metrics.go rename to vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go index a14a765f59..495c736365 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/api/metrics.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Sigstore Authors. +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,23 +11,25 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// -package api +//go:build solaris +// +build solaris + +package transport import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + "fmt" + "syscall" + + "golang.org/x/sys/unix" ) -var ( - metricNewEntries = promauto.NewCounter(prometheus.CounterOpts{ - Name: "fulcio_new_certs", - Help: "The total number of certificates generated", - }) +func setReusePort(network, address string, c syscall.RawConn) error { + return fmt.Errorf("port reuse is not supported on Solaris") +} - MetricLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Name: "fulcio_api_latency", - Help: "API Latency on calls", - }, []string{"code", "method"}) -) +func setReuseAddress(network, address string, conn syscall.RawConn) error { + return conn.Control(func(fd uintptr) { + syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) +} diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go index 432b52e0fc..e2cc6f4828 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go @@ -1,5 +1,19 @@ -//go:build !windows -// +build !windows +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !solaris +// +build !windows,!solaris package transport diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go index 4e5af70b11..3e28ed48b4 100644 --- a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go +++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + //go:build windows // +build windows diff --git a/vendor/go.etcd.io/etcd/client/v2/client.go b/vendor/go.etcd.io/etcd/client/v2/client.go index fda25988f6..c34bc9d7c8 100644 --- a/vendor/go.etcd.io/etcd/client/v2/client.go +++ b/vendor/go.etcd.io/etcd/client/v2/client.go @@ -19,7 +19,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "math/rand" "net" "net/http" @@ -601,13 +601,15 @@ func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Respon var body []byte done := make(chan struct{}) go func() { - body, err = ioutil.ReadAll(resp.Body) + body, err = io.ReadAll(resp.Body) done <- struct{}{} }() select { case <-ctx.Done(): - resp.Body.Close() + if resp != nil { + resp.Body.Close() + } <-done return nil, nil, ctx.Err() case <-done: diff --git a/vendor/go.etcd.io/etcd/client/v2/curl.go b/vendor/go.etcd.io/etcd/client/v2/curl.go index c8bc9fba20..8d12367541 100644 --- a/vendor/go.etcd.io/etcd/client/v2/curl.go +++ b/vendor/go.etcd.io/etcd/client/v2/curl.go @@ -17,7 +17,7 @@ package client import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "os" ) @@ -53,18 +53,18 @@ func printcURL(req *http.Request) error { } if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) + b, err = io.ReadAll(req.Body) if err != nil { return err } command += fmt.Sprintf(" -d %q", string(b)) } - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command) // reset body body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) + req.Body = io.NopCloser(body) return nil } diff --git a/vendor/go.etcd.io/etcd/client/v2/json.go b/vendor/go.etcd.io/etcd/client/v2/json.go deleted file mode 100644 index d5be690a17..0000000000 --- a/vendor/go.etcd.io/etcd/client/v2/json.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2019 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "strconv" - "unsafe" - - "github.com/json-iterator/go" - "github.com/modern-go/reflect2" -) - -type customNumberExtension struct { - jsoniter.DummyExtension -} - -func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { - if typ.String() == "interface {}" { - return customNumberDecoder{} - } - return nil -} - -type customNumberDecoder struct { -} - -func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { - switch iter.WhatIsNext() { - case jsoniter.NumberValue: - var number jsoniter.Number - iter.ReadVal(&number) - i64, err := strconv.ParseInt(string(number), 10, 64) - if err == nil { - *(*interface{})(ptr) = i64 - return - } - f64, err := strconv.ParseFloat(string(number), 64) - if err == nil { - *(*interface{})(ptr) = f64 - return - } - iter.ReportError("DecodeNumber", err.Error()) - default: - *(*interface{})(ptr) = iter.Read() - } -} - -// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be -// case-sensitive when unmarshalling, and otherwise compatible with -// the encoding/json standard library. -func caseSensitiveJsonIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} diff --git a/vendor/go.etcd.io/etcd/client/v2/keys.go b/vendor/go.etcd.io/etcd/client/v2/keys.go index e8f1664617..7eb927e562 100644 --- a/vendor/go.etcd.io/etcd/client/v2/keys.go +++ b/vendor/go.etcd.io/etcd/client/v2/keys.go @@ -26,6 +26,7 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/pathutil" + kjson "sigs.k8s.io/json" ) const ( @@ -653,11 +654,9 @@ func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Resp return res, err } -var jsonIterator = caseSensitiveJsonIterator() - func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { var res Response - err := jsonIterator.Unmarshal(body, &res) + err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res) if err != nil { return nil, ErrInvalidJSON } diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md index 1e037d7eb6..af0087ebcc 100644 --- a/vendor/go.etcd.io/etcd/client/v3/README.md +++ b/vendor/go.etcd.io/etcd/client/v3/README.md @@ -1,7 +1,7 @@ -# etcd/clientv3 +# etcd/client/v3 [![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) -[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3) `etcd/clientv3` is the official Go etcd client for v3. @@ -11,26 +11,23 @@ go get go.etcd.io/etcd/client/v3 ``` -Warning: As etcd 3.5.0 was not yet released, the command above does not work. -After first pre-release of 3.5.0 [#12498](https://github.com/etcd-io/etcd/issues/12498), -etcd can be referenced using: -``` -go get go.etcd.io/etcd/client/v3@v3.5.0-pre -``` - ## Get started Create client using `clientv3.New`: ```go -cli, err := clientv3.New(clientv3.Config{ - Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, - DialTimeout: 5 * time.Second, -}) -if err != nil { - // handle error! +import clientv3 "go.etcd.io/etcd/client/v3" + +func main() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, + }) + if err != nil { + // handle error! + } + defer cli.Close() } -defer cli.Close() ``` etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go index 2990379ab9..971fea607c 100644 --- a/vendor/go.etcd.io/etcd/client/v3/client.go +++ b/vendor/go.etcd.io/etcd/client/v3/client.go @@ -24,7 +24,6 @@ import ( "time" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/client/pkg/v3/logutil" "go.etcd.io/etcd/client/v3/credentials" "go.etcd.io/etcd/client/v3/internal/endpoint" "go.etcd.io/etcd/client/v3/internal/resolver" @@ -55,7 +54,9 @@ type Client struct { cfg Config creds grpccredentials.TransportCredentials resolver *resolver.EtcdManualResolver - mu *sync.RWMutex + + epMu *sync.RWMutex + endpoints []string ctx context.Context cancel context.CancelFunc @@ -161,18 +162,18 @@ func (c *Client) Ctx() context.Context { return c.ctx } // Endpoints lists the registered endpoints for the client. func (c *Client) Endpoints() []string { // copy the slice; protect original endpoints from being changed - c.mu.RLock() - defer c.mu.RUnlock() - eps := make([]string, len(c.cfg.Endpoints)) - copy(eps, c.cfg.Endpoints) + c.epMu.RLock() + defer c.epMu.RUnlock() + eps := make([]string, len(c.endpoints)) + copy(eps, c.endpoints) return eps } // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { - c.mu.Lock() - defer c.mu.Unlock() - c.cfg.Endpoints = eps + c.epMu.Lock() + defer c.epMu.Unlock() + c.endpoints = eps c.resolver.SetEndpoints(eps) } @@ -185,9 +186,7 @@ func (c *Client) Sync(ctx context.Context) error { } var eps []string for _, m := range mresp.Members { - if len(m.Name) != 0 && !m.IsLearner { - eps = append(eps, m.ClientURLs...) - } + eps = append(eps, m.ClientURLs...) } c.SetEndpoints(eps...) return nil @@ -264,6 +263,7 @@ func (c *Client) getToken(ctx context.Context) error { resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password) if err != nil { if err == rpctypes.ErrAuthNotEnabled { + c.authTokenBundle.UpdateAuthToken("") return nil } return err @@ -286,8 +286,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc. if err != nil { return nil, fmt.Errorf("failed to configure dialer: %v", err) } - if c.Username != "" && c.Password != "" { - c.authTokenBundle = credentials.NewBundle(credentials.Config{}) + if c.authTokenBundle != nil { opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials())) } @@ -299,7 +298,7 @@ func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc. dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? } - target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.Endpoints()[0])) + target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0])) conn, err := grpc.DialContext(dctx, target, opts...) if err != nil { return nil, err @@ -360,7 +359,7 @@ func newClient(cfg *Config) (*Client, error) { creds: creds, ctx: ctx, cancel: cancel, - mu: new(sync.RWMutex), + epMu: new(sync.RWMutex), callOpts: defaultCallOpts, lgMu: new(sync.RWMutex), } @@ -371,10 +370,7 @@ func newClient(cfg *Config) (*Client, error) { } else if cfg.LogConfig != nil { client.lg, err = cfg.LogConfig.Build() } else { - client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) - if client.lg != nil { - client.lg = client.lg.Named("etcd-client") - } + client.lg, err = CreateDefaultZapLogger() } if err != nil { return nil, err @@ -383,6 +379,7 @@ func newClient(cfg *Config) (*Client, error) { if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password + client.authTokenBundle = credentials.NewBundle(credentials.Config{}) } if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { @@ -408,6 +405,8 @@ func newClient(cfg *Config) (*Client, error) { client.cancel() return nil, fmt.Errorf("at least one Endpoint is required in client config") } + client.SetEndpoints(cfg.Endpoints...) + // Use a provided endpoint target so that for https:// without any tls config given, then // grpc will assume the certificate server name is the endpoint host. conn, err := client.dialWithBalancer() diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go index 645d744a5a..7cb31f9397 100644 --- a/vendor/go.etcd.io/etcd/client/v3/doc.go +++ b/vendor/go.etcd.io/etcd/client/v3/doc.go @@ -47,8 +47,8 @@ // To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) +// defer cancel() // resp, err := kvc.Put(ctx, "sample_key", "sample_value") -// cancel() // if err != nil { // // handle error! // } diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go index 5e9fb7d458..80cd80af1d 100644 --- a/vendor/go.etcd.io/etcd/client/v3/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/kv.go @@ -16,6 +16,7 @@ package clientv3 import ( "context" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -145,10 +146,14 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { case tRange: - var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) - if err == nil { - return OpResponse{get: (*GetResponse)(resp)}, nil + if op.IsSortOptionValid() { + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + } else { + err = rpctypes.ErrInvalidSortOption } case tPut: var resp *pb.PutResponse diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go index bd31e6b4a5..fcab61aaba 100644 --- a/vendor/go.etcd.io/etcd/client/v3/lease.go +++ b/vendor/go.etcd.io/etcd/client/v3/lease.go @@ -439,6 +439,9 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { for { stream, err := l.resetRecv() if err != nil { + l.lg.Warn("error occurred during lease keep alive loop", + zap.Error(err), + ) if canceledByCaller(l.stopCtx, err) { return err } @@ -571,7 +574,9 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for _, id := range tosend { r := &pb.LeaseKeepAliveRequest{ID: int64(id)} if err := stream.Send(r); err != nil { - // TODO do something with this error? + l.lg.Warn("error occurred during lease keep alive request sending", + zap.Error(err), + ) return } } diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go index ecac42730f..71a9e161ce 100644 --- a/vendor/go.etcd.io/etcd/client/v3/logger.go +++ b/vendor/go.etcd.io/etcd/client/v3/logger.go @@ -19,6 +19,7 @@ import ( "os" "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zapgrpc" "google.golang.org/grpc/grpclog" @@ -28,11 +29,10 @@ func init() { // We override grpc logger only when the environment variable is set // in order to not interfere by default with user's code or other libraries. if os.Getenv("ETCD_CLIENT_DEBUG") != "" { - lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel()) + lg, err := CreateDefaultZapLogger() if err != nil { panic(err) } - lg = lg.Named("etcd-client") grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) } } @@ -57,3 +57,21 @@ func etcdClientDebugLevel() zapcore.Level { } return l } + +// CreateDefaultZapLoggerConfig creates a logger config that is configurable using env variable: +// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info) +func CreateDefaultZapLoggerConfig() zap.Config { + lcfg := logutil.DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(etcdClientDebugLevel()) + return lcfg +} + +// CreateDefaultZapLogger creates a logger that is configurable using env variable: +// ETCD_CLIENT_DEBUG= debug|info|warn|error|dpanic|panic|fatal|true (true=info) +func CreateDefaultZapLogger() (*zap.Logger, error) { + c, err := CreateDefaultZapLoggerConfig().Build() + if err != nil { + return nil, err + } + return c.Named("etcd-client"), nil +} diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go index dbea530e66..25ff135ff0 100644 --- a/vendor/go.etcd.io/etcd/client/v3/maintenance.go +++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go @@ -16,6 +16,7 @@ package clientv3 import ( "context" + "errors" "fmt" "io" @@ -31,6 +32,15 @@ type ( StatusResponse pb.StatusResponse HashKVResponse pb.HashKVResponse MoveLeaderResponse pb.MoveLeaderResponse + DowngradeResponse pb.DowngradeResponse + + DowngradeAction pb.DowngradeRequest_DowngradeAction +) + +const ( + DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE) + DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE) + DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL) ) type Maintenance interface { @@ -57,14 +67,40 @@ type Maintenance interface { // is non-zero, the hash is computed on all keys at or below the given revision. HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) + // Snapshot provides a reader for a point-in-time snapshot of etcd. // If the context "ctx" is canceled or timed out, reading from returned // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + // Deprecated: use SnapshotWithVersion instead. Snapshot(ctx context.Context) (io.ReadCloser, error) // MoveLeader requests current leader to transfer its leadership to the transferee. // Request must be made to the leader. MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) + + // Downgrade requests downgrades, verifies feasibility or cancels downgrade + // on the cluster version. + // Supported since etcd 3.5. + Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) +} + +// SnapshotResponse is aggregated response from the snapshot stream. +// Consumer is responsible for closing steam by calling .Snapshot.Close() +type SnapshotResponse struct { + // Header is the first header in the snapshot stream, has the current key-value store information + // and indicates the point in time of the snapshot. + Header *pb.ResponseHeader + // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream. + Snapshot io.ReadCloser + // Version is the local version of server that created the snapshot. + // In cluster with binaries with different version, each cluster can return different result. + // Informs which etcd server version should be used when restoring the snapshot. + // Supported on etcd >= v3.6. + Version string } type maintenance struct { @@ -202,7 +238,7 @@ func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (* return (*HashKVResponse)(resp), nil } -func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { +func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) { ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) if err != nil { return nil, toErr(ctx, err) @@ -210,32 +246,84 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { m.lg.Info("opened snapshot stream; downloading") pr, pw := io.Pipe() + + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + } go func() { + // Saving response is blocking + err = m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) + return + } for { resp, err := ss.Recv() if err != nil { - switch err { - case io.EOF: - m.lg.Info("completed snapshot read; closing") - default: - m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) - } - pw.CloseWithError(err) + m.logAndCloseWithError(err, pw) + return + } + err = m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) return } + } + }() + return &SnapshotResponse{ + Header: resp.Header, + Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, + Version: resp.Version, + }, err +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } - // can "resp == nil && err == nil" - // before we receive snapshot SHA digest? - // No, server sends EOF with an empty response - // after it sends SHA digest at the end + m.lg.Info("opened snapshot stream; downloading") + pr, pw := io.Pipe() - if _, werr := pw.Write(resp.Blob); werr != nil { - pw.CloseWithError(werr) + go func() { + for { + resp, err := ss.Recv() + if err != nil { + m.logAndCloseWithError(err, pw) + return + } + err = m.save(resp, pw) + if err != nil { + m.logAndCloseWithError(err, pw) return } } }() - return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, err +} + +func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) { + switch err { + case io.EOF: + m.lg.Info("completed snapshot read; closing") + default: + m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err)) + } + pw.CloseWithError(err) +} + +func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error { + // can "resp == nil && err == nil" + // before we receive snapshot SHA digest? + // No, server sends EOF with an empty response + // after it sends SHA digest at the end + + if _, werr := pw.Write(resp.Blob); werr != nil { + return werr + } + return nil } type snapshotReadCloser struct { @@ -252,3 +340,19 @@ func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*Mov resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) return (*MoveLeaderResponse)(resp), toErr(ctx, err) } + +func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) { + var actionType pb.DowngradeRequest_DowngradeAction + switch action { + case DowngradeValidate: + actionType = pb.DowngradeRequest_VALIDATE + case DowngradeEnable: + actionType = pb.DowngradeRequest_ENABLE + case DowngradeCancel: + actionType = pb.DowngradeRequest_CANCEL + default: + return nil, errors.New("etcdclient: unknown downgrade action") + } + resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...) + return (*DowngradeResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/go.etcd.io/etcd/client/v3/mirror/syncer.go b/vendor/go.etcd.io/etcd/client/v3/mirror/syncer.go index 8a9ad3faf1..980bab5deb 100644 --- a/vendor/go.etcd.io/etcd/client/v3/mirror/syncer.go +++ b/vendor/go.etcd.io/etcd/client/v3/mirror/syncer.go @@ -18,7 +18,7 @@ package mirror import ( "context" - clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3" ) const ( @@ -52,13 +52,7 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha // if rev is not specified, we will choose the most recent revision. if s.rev == 0 { - // If len(s.prefix) == 0, we will check a random key to fetch the most recent - // revision (foo), otherwise we use the provided prefix. - checkPath := "foo" - if len(s.prefix) != 0 { - checkPath = s.prefix - } - resp, err := s.c.Get(ctx, checkPath) + resp, err := s.c.Get(ctx, "foo") if err != nil { errchan <- err close(respchan) @@ -74,7 +68,8 @@ func (s *syncer) SyncBase(ctx context.Context) (<-chan clientv3.GetResponse, cha var key string - opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev)} + opts := []clientv3.OpOption{clientv3.WithLimit(batchLimit), clientv3.WithRev(s.rev), + clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)} if len(s.prefix) == 0 { // If len(s.prefix) == 0, we will sync the entire key-value space. diff --git a/vendor/go.etcd.io/etcd/client/v3/namespace/kv.go b/vendor/go.etcd.io/etcd/client/v3/namespace/kv.go index f745225cac..9a428fa585 100644 --- a/vendor/go.etcd.io/etcd/client/v3/namespace/kv.go +++ b/vendor/go.etcd.io/etcd/client/v3/namespace/kv.go @@ -51,7 +51,11 @@ func (kv *kvPrefix) Get(ctx context.Context, key string, opts ...clientv3.OpOpti if len(key) == 0 && !(clientv3.IsOptsWithFromKey(opts) || clientv3.IsOptsWithPrefix(opts)) { return nil, rpctypes.ErrEmptyKey } - r, err := kv.KV.Do(ctx, kv.prefixOp(clientv3.OpGet(key, opts...))) + getOp := clientv3.OpGet(key, opts...) + if !getOp.IsSortOptionValid() { + return nil, rpctypes.ErrInvalidSortOption + } + r, err := kv.KV.Do(ctx, kv.prefixOp(getOp)) if err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints.go b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints.go index 72bd227874..ffe77eff7b 100644 --- a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints.go +++ b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package endpoints import ( diff --git a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints_impl.go b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints_impl.go index 37f04803e1..7796f7c9cb 100644 --- a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints_impl.go +++ b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/endpoints_impl.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package endpoints // TODO: The API is not yet implemented. diff --git a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/internal/update.go b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/internal/update.go index 71aa83fed4..d42f49062a 100644 --- a/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/internal/update.go +++ b/vendor/go.etcd.io/etcd/client/v3/naming/endpoints/internal/update.go @@ -1,3 +1,17 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package internal // Operation describes action performed on endpoint (addition vs deletion). diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go index e8c0c1e08c..6492fbdff7 100644 --- a/vendor/go.etcd.io/etcd/client/v3/op.go +++ b/vendor/go.etcd.io/etcd/client/v3/op.go @@ -581,3 +581,19 @@ func IsOptsWithFromKey(opts []OpOption) bool { return ret.isOptsWithFromKey } + +func (op Op) IsSortOptionValid() bool { + if op.sort != nil { + sortOrder := int32(op.sort.Order) + sortTarget := int32(op.sort.Target) + + if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok { + return false + } + + if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok { + return false + } + } + return true +} diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go index 04f157a1dc..6cd4be047e 100644 --- a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go +++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go @@ -19,6 +19,7 @@ package clientv3 import ( "context" + "errors" "io" "sync" "time" @@ -53,6 +54,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Debug( "retrying of unary invoker", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), ) lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) @@ -62,6 +64,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien c.GetLogger().Warn( "retrying of unary invoker failed", zap.String("target", cc.Target()), + zap.String("method", method), zap.Uint("attempt", attempt), zap.Error(lastErr), ) @@ -91,7 +94,7 @@ func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClien } continue } - if !isSafeRetry(c.lg, lastErr, callOpts) { + if !isSafeRetry(c, lastErr, callOpts) { return lastErr } } @@ -270,7 +273,7 @@ func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{} return true, err } - return isSafeRetry(s.client.lg, err, s.callOpts), err + return isSafeRetry(s.client, err, s.callOpts), err } func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { @@ -310,17 +313,28 @@ func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) erro } // isSafeRetry returns "true", if request is safe for retry with the given error. -func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { +func isSafeRetry(c *Client, err error, callOpts *options) bool { if isContextError(err) { return false } + + // Situation when learner refuses RPC it is supposed to not serve is from the server + // perspective not retryable. + // But for backward-compatibility reasons we need to support situation that + // customer provides mix of learners (not yet voters) and voters with an + // expectation to pick voter in the next attempt. + // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability. + if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 { + return true + } + switch callOpts.retryPolicy { case repeatable: return isSafeRetryImmutableRPC(err) case nonRepeatable: return isSafeRetryMutableRPC(err) default: - lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) return false } } diff --git a/vendor/go.etcd.io/etcd/client/v3/snapshot/v3_snapshot.go b/vendor/go.etcd.io/etcd/client/v3/snapshot/v3_snapshot.go index 39d5211aae..d5a8c26828 100644 --- a/vendor/go.etcd.io/etcd/client/v3/snapshot/v3_snapshot.go +++ b/vendor/go.etcd.io/etcd/client/v3/snapshot/v3_snapshot.go @@ -36,21 +36,25 @@ func hasChecksum(n int64) bool { return (n % 512) == sha256.Size } -// Save fetches snapshot from remote etcd server and saves data -// to target path. If the context "ctx" is canceled or timed out, +// SaveWithVersion fetches snapshot from remote etcd server, saves data +// to target path and returns server version. If the context "ctx" is canceled or timed out, // snapshot save stream will error out (e.g. context.Canceled, // context.DeadlineExceeded). Make sure to specify only one endpoint // in client configuration. Snapshot API must be requested to a // selected node, and saved snapshot is the point-in-time state of // the selected node. -func Save(ctx context.Context, lg *zap.Logger, cfg clientv3.Config, dbPath string) error { +// Etcd + +NOTICE: Downgrades is an experimental feature in v3.6 and is not recommended for production clusters. + +Downgrade provides commands to downgrade cluster. +Normally etcd members cannot be downgraded due to cluster version mechanism. + +After initial bootstrap, cluster members agree on the cluster version. Every 5 seconds, leader checks versions of all members and picks lowers minor version. +New members will refuse joining cluster with cluster version newer than theirs, thus preventing cluster from downgrading. +Downgrade commands allow cluster administrator to force cluster version to be lowered to previous minor version, thus allowing to downgrade the cluster. + +Downgrade should be is executed in stages: +1. Verify that cluster is ready be downgraded by running `etcdctl downgrade validate ` +2. Start the downgrade process by running `etcdctl downgrade enable ` +3. For each cluster member: + 1. Ensure that member is ready for downgrade by confirming that it wrote `The server is ready to downgrade` log. + 2. Replace member binary with one with older version. + 3. Confirm that member has correctly started and joined the cluster. +4. Ensure that downgrade process has succeeded by checking leader log for `the cluster has been downgraded` + +Downgrade can be canceled by running `etcdctl downgrade cancel` command. + +In case of downgrade being canceled, cluster version will return to its normal behavior (pick the lowest member minor version). +If no members were downgraded, cluster version will return to original value. +If at least one member was downgraded, cluster version will stay at the `` until downgraded members are upgraded back. + +### DOWNGRADE VALIDATE \ + +DOWNGRADE VALIDATE validate downgrade capability before starting downgrade. + +#### Example + +```bash +./etcdctl downgrade validate 3.5 +Downgrade validate success, cluster version 3.6 + +./etcdctl downgrade validate 3.4 +Error: etcdserver: invalid downgrade target version + +``` + +### DOWNGRADE ENABLE \ + +DOWNGRADE ENABLE starts a downgrade action to cluster. + +#### Example + +```bash +./etcdctl downgrade enable 3.5 +Downgrade enable success, cluster version 3.6 +``` + +### DOWNGRADE CANCEL \ + +DOWNGRADE CANCEL cancels the ongoing downgrade action to cluster. + +#### Example + +```bash +./etcdctl downgrade cancel +Downgrade cancel success, cluster version 3.5 +``` + ## Concurrency commands ### LOCK [options] \ [command arg1 arg2 ...] diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/cluster_health.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/cluster_health.go index a89646b9f5..653be46854 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/cluster_health.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/cluster_health.go @@ -18,7 +18,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "os" "os/signal" @@ -89,7 +89,7 @@ func handleClusterHealth(c *cli.Context) error { result := struct{ Health string }{} nresult := struct{ Health bool }{} - bytes, err := ioutil.ReadAll(resp.Body) + bytes, err := io.ReadAll(resp.Body) if err != nil { fmt.Printf("failed to check the health of member %s on %s: %v\n", m.ID, url, err) continue diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/util.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/util.go index b80486b7ec..7db2df56f6 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/util.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv2/command/util.go @@ -19,7 +19,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -48,7 +47,7 @@ func argOrStdin(args []string, stdin io.Reader, i int) (string, error) { if i < len(args) { return args[i], nil } - bytes, err := ioutil.ReadAll(stdin) + bytes, err := io.ReadAll(stdin) if string(bytes) == "" || err != nil { return "", ErrNoAvailSrc } diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/check.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/check.go index a2a5ca3159..50d6d3bbd7 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/check.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/check.go @@ -130,6 +130,9 @@ func NewCheckPerfCommand() *cobra.Command { cmd.Flags().StringVar(&checkPerfPrefix, "prefix", "/etcdctl-check-perf/", "The prefix for writing the performance check's keys.") cmd.Flags().BoolVar(&autoCompact, "auto-compact", false, "Compact storage with last revision after test is finished.") cmd.Flags().BoolVar(&autoDefrag, "auto-defrag", false, "Defragment storage after test is finished.") + cmd.RegisterFlagCompletionFunc("load", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"small", "medium", "large", "xLarge"}, cobra.ShellCompDirectiveDefault + }) return cmd } diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/completion_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/completion_command.go new file mode 100644 index 0000000000..66a213cd3a --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/completion_command.go @@ -0,0 +1,84 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "os" + + "github.com/spf13/cobra" +) + +func NewCompletionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: `To load completions: + +Bash: + + $ source <(etcdctl completion bash) + + # To load completions for each session, execute once: + # Linux: + $ etcdctl completion bash > /etc/bash_completion.d/etcdctl + # macOS: + $ etcdctl completion bash > /usr/local/etc/bash_completion.d/etcdctl + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ etcdctl completion zsh > "${fpath[1]}/_etcdctl" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ etcdctl completion fish | source + + # To load completions for each session, execute once: + $ etcdctl completion fish > ~/.config/fish/completions/etcdctl.fish + +PowerShell: + + PS> etcdctl completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> etcdctl completion powershell > etcdctl.ps1 + # and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, + } + + return cmd +} diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/defrag_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/defrag_command.go index 42e47cbb90..9b4f29a6aa 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/defrag_command.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/defrag_command.go @@ -17,6 +17,7 @@ package command import ( "fmt" "os" + "time" "github.com/spf13/cobra" "go.etcd.io/etcd/etcdutl/v3/etcdutl" @@ -36,6 +37,7 @@ func NewDefragCommand() *cobra.Command { } cmd.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Optional. If present, defragments a data directory not in use by etcd.") + cmd.MarkFlagDirname("data-dir") return cmd } @@ -52,13 +54,15 @@ func defragCommandFunc(cmd *cobra.Command, args []string) { c := mustClientFromCmd(cmd) for _, ep := range endpointsFromCluster(cmd) { ctx, cancel := commandCtx(cmd) + start := time.Now() _, err := c.Defragment(ctx, ep) + d := time.Now().Sub(start) cancel() if err != nil { - fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s] (%v)\n", ep, err) + fmt.Fprintf(os.Stderr, "Failed to defragment etcd member[%s]. took %s. (%v)\n", ep, d.String(), err) failures++ } else { - fmt.Printf("Finished defragmenting etcd member[%s]\n", ep) + fmt.Printf("Finished defragmenting etcd member[%s]. took %s\n", ep, d.String()) } } diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/downgrade_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/downgrade_command.go new file mode 100644 index 0000000000..bccae16c33 --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/downgrade_command.go @@ -0,0 +1,136 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "errors" + + "github.com/spf13/cobra" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/pkg/v3/cobrautl" +) + +// NewDowngradeCommand returns the cobra command for "downgrade". +func NewDowngradeCommand() *cobra.Command { + dc := &cobra.Command{ + Use: "downgrade ", + Short: "Downgrade related commands", + } + + dc.AddCommand(NewDowngradeValidateCommand()) + dc.AddCommand(NewDowngradeEnableCommand()) + dc.AddCommand(NewDowngradeCancelCommand()) + + return dc +} + +// NewDowngradeValidateCommand returns the cobra command for "downgrade validate". +func NewDowngradeValidateCommand() *cobra.Command { + cc := &cobra.Command{ + Use: "validate ", + Short: "Validate downgrade capability before starting downgrade", + + Run: downgradeValidateCommandFunc, + } + return cc +} + +// NewDowngradeEnableCommand returns the cobra command for "downgrade enable". +func NewDowngradeEnableCommand() *cobra.Command { + cc := &cobra.Command{ + Use: "enable ", + Short: "Start a downgrade action to cluster", + + Run: downgradeEnableCommandFunc, + } + return cc +} + +// NewDowngradeCancelCommand returns the cobra command for "downgrade cancel". +func NewDowngradeCancelCommand() *cobra.Command { + cc := &cobra.Command{ + Use: "cancel", + Short: "Cancel the ongoing downgrade action to cluster", + + Run: downgradeCancelCommandFunc, + } + return cc +} + +// downgradeValidateCommandFunc executes the "downgrade validate" command. +func downgradeValidateCommandFunc(cmd *cobra.Command, args []string) { + if len(args) < 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided")) + } + if len(args) > 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments")) + } + targetVersion := args[0] + + if len(targetVersion) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided")) + } + + ctx, cancel := commandCtx(cmd) + cli := mustClientFromCmd(cmd) + + resp, err := cli.Downgrade(ctx, clientv3.DowngradeValidate, targetVersion) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.DowngradeValidate(*resp) +} + +// downgradeEnableCommandFunc executes the "downgrade enable" command. +func downgradeEnableCommandFunc(cmd *cobra.Command, args []string) { + if len(args) < 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("TARGET_VERSION not provided")) + } + if len(args) > 1 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("too many arguments")) + } + targetVersion := args[0] + + if len(targetVersion) == 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("target version not provided")) + } + + ctx, cancel := commandCtx(cmd) + cli := mustClientFromCmd(cmd) + + resp, err := cli.Downgrade(ctx, clientv3.DowngradeEnable, targetVersion) + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.DowngradeEnable(*resp) +} + +// downgradeCancelCommandFunc executes the "downgrade cancel" command. +func downgradeCancelCommandFunc(cmd *cobra.Command, args []string) { + ctx, cancel := commandCtx(cmd) + cli := mustClientFromCmd(cmd) + + resp, err := cli.Downgrade(ctx, clientv3.DowngradeCancel, "") + cancel() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + + display.DowngradeCancel(*resp) +} diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/get_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/get_command.go index c94ac08b98..34edb6fe2e 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/get_command.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/get_command.go @@ -54,6 +54,17 @@ func NewGetCommand() *cobra.Command { cmd.Flags().BoolVar(&getKeysOnly, "keys-only", false, "Get only the keys") cmd.Flags().BoolVar(&getCountOnly, "count-only", false, "Get only the count") cmd.Flags().BoolVar(&printValueOnly, "print-value-only", false, `Only write values when using the "simple" output format`) + + cmd.RegisterFlagCompletionFunc("consistency", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"l", "s"}, cobra.ShellCompDirectiveDefault + }) + cmd.RegisterFlagCompletionFunc("order", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"ASCEND", "DESCEND"}, cobra.ShellCompDirectiveDefault + }) + cmd.RegisterFlagCompletionFunc("sort-by", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"CREATE", "KEY", "MODIFY", "VALUE", "VERSION"}, cobra.ShellCompDirectiveDefault + }) + return cmd } diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/global.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/global.go index c50ab5963a..9097419d3e 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/global.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/global.go @@ -19,7 +19,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "strings" "time" @@ -141,7 +140,7 @@ func clientConfigFromCmd(cmd *cobra.Command) *clientConfig { // too many routine connection disconnects to turn on by default. // // See https://github.com/etcd-io/etcd/pull/9623 for background - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, io.Discard, os.Stderr)) } cfg := &clientConfig{} @@ -254,7 +253,7 @@ func argOrStdin(args []string, stdin io.Reader, i int) (string, error) { if i < len(args) { return args[i], nil } - bytes, err := ioutil.ReadAll(stdin) + bytes, err := io.ReadAll(stdin) if string(bytes) == "" || err != nil { return "", errors.New("no available argument and stdin") } diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/make_mirror_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/make_mirror_command.go index aaa51eae9e..3d8b869d0c 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/make_mirror_command.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/make_mirror_command.go @@ -43,6 +43,7 @@ var ( mmuser string mmpassword string mmnodestprefix bool + mmrev int64 ) // NewMakeMirrorCommand returns the cobra command for "makeMirror". @@ -54,6 +55,7 @@ func NewMakeMirrorCommand() *cobra.Command { } c.Flags().StringVar(&mmprefix, "prefix", "", "Key-value prefix to mirror") + c.Flags().Int64Var(&mmrev, "rev", 0, "Specify the kv revision to start to mirror") c.Flags().StringVar(&mmdestprefix, "dest-prefix", "", "destination prefix to mirror a prefix to a different prefix in the destination cluster") c.Flags().BoolVar(&mmnodestprefix, "no-dest-prefix", false, "mirror key-values to the root of the destination cluster") c.Flags().StringVar(&mmcert, "dest-cert", "", "Identify secure client using this TLS certificate file for the destination cluster") @@ -130,6 +132,11 @@ func makeMirrorCommandFunc(cmd *cobra.Command, args []string) { func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) error { total := int64(0) + // if destination prefix is specified and remove destination prefix is true return error + if mmnodestprefix && len(mmdestprefix) > 0 { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, errors.New("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one")) + } + go func() { for { time.Sleep(30 * time.Second) @@ -137,33 +144,37 @@ func makeMirror(ctx context.Context, c *clientv3.Client, dc *clientv3.Client) er } }() - s := mirror.NewSyncer(c, mmprefix, 0) + startRev := mmrev - 1 + if startRev < 0 { + startRev = 0 + } - rc, errc := s.SyncBase(ctx) + s := mirror.NewSyncer(c, mmprefix, startRev) - // if destination prefix is specified and remove destination prefix is true return error - if mmnodestprefix && len(mmdestprefix) > 0 { - cobrautl.ExitWithError(cobrautl.ExitBadArgs, fmt.Errorf("`--dest-prefix` and `--no-dest-prefix` cannot be set at the same time, choose one")) - } + // If a rev is provided, then do not sync the whole key space. + // Instead, just start watching the key space starting from the rev + if startRev == 0 { + rc, errc := s.SyncBase(ctx) - // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix - if !mmnodestprefix && len(mmdestprefix) == 0 { - mmdestprefix = mmprefix - } + // if remove destination prefix is false and destination prefix is empty set the value of destination prefix same as prefix + if !mmnodestprefix && len(mmdestprefix) == 0 { + mmdestprefix = mmprefix + } - for r := range rc { - for _, kv := range r.Kvs { - _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value)) - if err != nil { - return err + for r := range rc { + for _, kv := range r.Kvs { + _, err := dc.Put(ctx, modifyPrefix(string(kv.Key)), string(kv.Value)) + if err != nil { + return err + } + atomic.AddInt64(&total, 1) } - atomic.AddInt64(&total, 1) } - } - err := <-errc - if err != nil { - return err + err := <-errc + if err != nil { + return err + } } wc := s.SyncUpdates(ctx) diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer.go index 2d31d9ec8c..287f88984c 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer.go @@ -50,6 +50,10 @@ type printer interface { EndpointHashKV([]epHashKV) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) + DowngradeValidate(r v3.DowngradeResponse) + DowngradeEnable(r v3.DowngradeResponse) + DowngradeCancel(r v3.DowngradeResponse) + Alarm(v3.AlarmResponse) RoleAdd(role string, r v3.AuthRoleAddResponse) @@ -110,11 +114,17 @@ func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) { func (p *printerRPC) MemberUpdate(id uint64, r v3.MemberUpdateResponse) { p.p((*pb.MemberUpdateResponse)(&r)) } +func (p *printerRPC) MemberPromote(id uint64, r v3.MemberPromoteResponse) { + p.p((*pb.MemberPromoteResponse)(&r)) +} func (p *printerRPC) MemberList(r v3.MemberListResponse) { p.p((*pb.MemberListResponse)(&r)) } func (p *printerRPC) Alarm(r v3.AlarmResponse) { p.p((*pb.AlarmResponse)(&r)) } func (p *printerRPC) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p((*pb.MoveLeaderResponse)(&r)) } +func (p *printerRPC) DowngradeValidate(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } +func (p *printerRPC) DowngradeEnable(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } +func (p *printerRPC) DowngradeCancel(r v3.DowngradeResponse) { p.p((*pb.DowngradeResponse)(&r)) } func (p *printerRPC) RoleAdd(_ string, r v3.AuthRoleAddResponse) { p.p((*pb.AuthRoleAddResponse)(&r)) } func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { p.p((*pb.AuthRoleGetResponse)(&r)) } func (p *printerRPC) RoleDelete(_ string, r v3.AuthRoleDeleteResponse) { @@ -160,6 +170,9 @@ func (p *printerUnsupported) EndpointStatus([]epStatus) { p.p(nil) } func (p *printerUnsupported) EndpointHashKV([]epHashKV) { p.p(nil) } func (p *printerUnsupported) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p(nil) } +func (p *printerUnsupported) DowngradeValidate(r v3.DowngradeResponse) { p.p(nil) } +func (p *printerUnsupported) DowngradeEnable(r v3.DowngradeResponse) { p.p(nil) } +func (p *printerUnsupported) DowngradeCancel(r v3.DowngradeResponse) { p.p(nil) } func makeMemberListTable(r v3.MemberListResponse) (hdr []string, rows [][]string) { hdr = []string{"ID", "Status", "Name", "Peer Addrs", "Client Addrs", "Is Learner"} @@ -198,7 +211,7 @@ func makeEndpointHealthTable(healthList []epHealth) (hdr []string, rows [][]stri } func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]string) { - hdr = []string{"endpoint", "ID", "version", "db size", "is leader", "is learner", "raft term", + hdr = []string{"endpoint", "ID", "version", "db size", "db size in use", "is leader", "is learner", "raft term", "raft index", "raft applied index", "errors"} for _, status := range statusList { rows = append(rows, []string{ @@ -206,6 +219,7 @@ func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]stri fmt.Sprintf("%x", status.Resp.Header.MemberId), status.Resp.Version, humanize.Bytes(uint64(status.Resp.DbSize)), + humanize.Bytes(uint64(status.Resp.DbSizeInUse)), fmt.Sprint(status.Resp.Leader == status.Resp.Header.MemberId), fmt.Sprint(status.Resp.IsLearner), fmt.Sprint(status.Resp.RaftTerm), diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_fields.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_fields.go index ca4611c735..2cb5def1b3 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_fields.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_fields.go @@ -156,6 +156,7 @@ func (p *fieldsPrinter) EndpointStatus(eps []epStatus) { p.hdr(ep.Resp.Header) fmt.Printf("\"Version\" : %q\n", ep.Resp.Version) fmt.Println(`"DBSize" :`, ep.Resp.DbSize) + fmt.Println(`"DBSizeInUse" :`, ep.Resp.DbSizeInUse) fmt.Println(`"Leader" :`, ep.Resp.Leader) fmt.Println(`"IsLearner" :`, ep.Resp.IsLearner) fmt.Println(`"RaftIndex" :`, ep.Resp.RaftIndex) diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_json.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_json.go index ca90a4a311..4c75c85a8c 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_json.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_json.go @@ -67,7 +67,7 @@ func printMemberListWithHexJSON(r clientv3.MemberListResponse) { b = strconv.AppendUint(nil, r.Header.MemberId, 16) buffer.Write(b) buffer.WriteString("\",\"raft_term\":") - b = strconv.AppendUint(nil, r.Header.RaftTerm, 16) + b = strconv.AppendUint(nil, r.Header.RaftTerm, 10) buffer.Write(b) buffer.WriteByte('}') for i := 0; i < len(r.Members); i++ { diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_simple.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_simple.go index c5939fa472..32f8cac604 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_simple.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/printer_simple.go @@ -24,6 +24,8 @@ import ( v3 "go.etcd.io/etcd/client/v3" ) +const rootRole = "root" + type simplePrinter struct { isHex bool valueOnly bool @@ -174,12 +176,30 @@ func (s *simplePrinter) MoveLeader(leader, target uint64, r v3.MoveLeaderRespons fmt.Printf("Leadership transferred from %s to %s\n", types.ID(leader), types.ID(target)) } +func (s *simplePrinter) DowngradeValidate(r v3.DowngradeResponse) { + fmt.Printf("Downgrade validate success, cluster version %s\n", r.Version) +} +func (s *simplePrinter) DowngradeEnable(r v3.DowngradeResponse) { + fmt.Printf("Downgrade enable success, cluster version %s\n", r.Version) +} +func (s *simplePrinter) DowngradeCancel(r v3.DowngradeResponse) { + fmt.Printf("Downgrade cancel success, cluster version %s\n", r.Version) +} + func (s *simplePrinter) RoleAdd(role string, r v3.AuthRoleAddResponse) { fmt.Printf("Role %s created\n", role) } func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) { fmt.Printf("Role %s\n", role) + if rootRole == role && r.Perm == nil { + fmt.Println("KV Read:") + fmt.Println("\t[, ") + fmt.Println("KV Write:") + fmt.Println("\t[, ") + return + } + fmt.Println("KV Read:") printRange := func(perm *v3.Permission) { @@ -190,7 +210,7 @@ func (s *simplePrinter) RoleGet(role string, r v3.AuthRoleGetResponse) { } else { fmt.Printf("\t[%s, ", sKey) } - if v3.GetPrefixRangeEnd(sKey) == sRangeEnd { + if v3.GetPrefixRangeEnd(sKey) == sRangeEnd && len(sKey) > 0 { fmt.Printf(" (prefix %s)", sKey) } fmt.Printf("\n") diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/put_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/put_command.go index 35eb32148d..b8a2d38b56 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/put_command.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/put_command.go @@ -20,7 +20,7 @@ import ( "strconv" "github.com/spf13/cobra" - "go.etcd.io/etcd/client/v3" + clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/pkg/v3/cobrautl" ) diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/snapshot_command.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/snapshot_command.go index e5d3f3f1c5..ea52f3a269 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/snapshot_command.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/snapshot_command.go @@ -88,6 +88,8 @@ func NewSnapshotRestoreCommand() *cobra.Command { cmd.Flags().StringVar(&restorePeerURLs, "initial-advertise-peer-urls", defaultInitialAdvertisePeerURLs, "List of this member's peer URLs to advertise to the rest of the cluster") cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member") cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)") + cmd.MarkFlagDirname("data-dir") + cmd.MarkFlagDirname("wal-dir") return cmd } @@ -112,10 +114,14 @@ func snapshotSaveCommandFunc(cmd *cobra.Command, args []string) { defer cancel() path := args[0] - if err := snapshot.Save(ctx, lg, *cfg, path); err != nil { + version, err := snapshot.SaveWithVersion(ctx, lg, *cfg, path) + if err != nil { cobrautl.ExitWithError(cobrautl.ExitInterrupted, err) } fmt.Printf("Snapshot saved at %s\n", path) + if version != "" { + fmt.Printf("Server version %s\n", version) + } } func snapshotStatusCommandFunc(cmd *cobra.Command, args []string) { diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/util.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/util.go index cd15fd3395..27f1e51e9b 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/util.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/command/util.go @@ -19,7 +19,7 @@ import ( "crypto/tls" "encoding/hex" "fmt" - "io/ioutil" + "io" "net/http" "regexp" "strconv" @@ -117,7 +117,7 @@ func endpointMemoryMetrics(host string, scfg *secureCfg) float64 { fmt.Println(fmt.Sprintf("fetch error: %v", err)) return 0.0 } - byts, readerr := ioutil.ReadAll(resp.Body) + byts, readerr := io.ReadAll(resp.Body) resp.Body.Close() if readerr != nil { fmt.Println(fmt.Sprintf("fetch error: reading %s: %v", url, readerr)) diff --git a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/ctl.go b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/ctl.go index d25263c734..bfe8f8674a 100644 --- a/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/ctl.go +++ b/vendor/go.etcd.io/etcd/etcdctl/v3/ctlv3/ctl.go @@ -53,6 +53,9 @@ func init() { rootCmd.PersistentFlags().StringVarP(&globalFlags.OutputFormat, "write-out", "w", "simple", "set the output format (fields, json, protobuf, simple, table)") rootCmd.PersistentFlags().BoolVar(&globalFlags.IsHex, "hex", false, "print byte strings as hex encoded strings") + rootCmd.RegisterFlagCompletionFunc("write-out", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"fields", "json", "protobuf", "simple", "table"}, cobra.ShellCompDirectiveDefault + }) rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections") rootCmd.PersistentFlags().DurationVar(&globalFlags.CommandTimeOut, "command-timeout", defaultCommandTimeOut, "timeout for short running command (excluding dial timeout)") @@ -93,6 +96,8 @@ func init() { command.NewUserCommand(), command.NewRoleCommand(), command.NewCheckCommand(), + command.NewCompletionCommand(), + command.NewDowngradeCommand(), ) } diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/backup_command.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/backup_command.go index c09bcf14a7..ce9b50bd9a 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/backup_command.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/backup_command.go @@ -27,15 +27,15 @@ import ( "go.etcd.io/etcd/pkg/v3/idutil" "go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/datadir" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/datadir" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.etcd.io/etcd/server/v3/verify" - "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" bolt "go.etcd.io/bbolt" "go.uber.org/zap" @@ -64,6 +64,10 @@ func NewBackupCommand() *cobra.Command { cmd.Flags().BoolVar(&withV3, "with-v3", true, "Backup v3 backend data") cmd.MarkFlagRequired("data-dir") cmd.MarkFlagRequired("backup-dir") + cmd.MarkFlagDirname("data-dir") + cmd.MarkFlagDirname("wal-dir") + cmd.MarkFlagDirname("backup-dir") + cmd.MarkFlagDirname("backup-wal-dir") return cmd } @@ -114,7 +118,7 @@ func HandleBackup(withV3 bool, srcDir string, destDir string, srcWAL string, des destWAL = datadir.ToWalDir(destDir) } - if err := fileutil.CreateDirAll(destSnap); err != nil { + if err := fileutil.CreateDirAll(lg, destSnap); err != nil { lg.Fatal("failed creating backup snapshot dir", zap.String("dest-snap", destSnap), zap.Error(err)) } @@ -282,7 +286,6 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir case src = <-ch: case <-time.After(time.Second): lg.Fatal("timed out waiting to acquire lock on", zap.String("srcDB", srcDB)) - src = <-ch } defer src.Close() @@ -307,22 +310,22 @@ func saveDB(lg *zap.Logger, destDB, srcDB string, idx uint64, term uint64, desir be := backend.NewDefaultBackend(destDB) defer be.Close() - - if err := membership.TrimClusterFromBackend(be); err != nil { + ms := schema.NewMembershipBackend(lg, be) + if err := ms.TrimClusterFromBackend(); err != nil { lg.Fatal("bbolt tx.Membership failed", zap.Error(err)) } raftCluster := membership.NewClusterFromMembers(lg, desired.clusterId, desired.members) raftCluster.SetID(desired.nodeId, desired.clusterId) - raftCluster.SetBackend(be) + raftCluster.SetBackend(ms) raftCluster.PushMembershipToStorage() if !v3 { tx := be.BatchTx() tx.Lock() defer tx.Unlock() - cindex.UnsafeCreateMetaBucket(tx) - cindex.UnsafeUpdateConsistentIndex(tx, idx, term, false) + schema.UnsafeCreateMetaBucket(tx) + schema.UnsafeUpdateConsistentIndex(tx, idx, term, false) } else { // Thanks to translateWAL not moving entries, but just replacing them with // 'empty', there is no need to update the consistency index. diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/completion_commmand.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/completion_commmand.go new file mode 100644 index 0000000000..792799b15b --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/completion_commmand.go @@ -0,0 +1,84 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdutl + +import ( + "os" + + "github.com/spf13/cobra" +) + +func NewCompletionCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "completion [bash|zsh|fish|powershell]", + Short: "Generate completion script", + Long: `To load completions: + +Bash: + + $ source <(etcdutl completion bash) + + # To load completions for each session, execute once: + # Linux: + $ etcdutl completion bash > /etc/bash_completion.d/etcdutl + # macOS: + $ etcdutl completion bash > /usr/local/etc/bash_completion.d/etcdutl + +Zsh: + + # If shell completion is not already enabled in your environment, + # you will need to enable it. You can execute the following once: + + $ echo "autoload -U compinit; compinit" >> ~/.zshrc + + # To load completions for each session, execute once: + $ etcdutl completion zsh > "${fpath[1]}/_etcdutl" + + # You will need to start a new shell for this setup to take effect. + +fish: + + $ etcdutl completion fish | source + + # To load completions for each session, execute once: + $ etcdutl completion fish > ~/.config/fish/completions/etcdutl.fish + +PowerShell: + + PS> etcdutl completion powershell | Out-String | Invoke-Expression + + # To load completions for every new session, run: + PS> etcdutl completion powershell > etcdutl.ps1 + # and source this file from your PowerShell profile. +`, + DisableFlagsInUseLine: true, + ValidArgs: []string{"bash", "zsh", "fish", "powershell"}, + Args: cobra.ExactValidArgs(1), + Run: func(cmd *cobra.Command, args []string) { + switch args[0] { + case "bash": + cmd.Root().GenBashCompletion(os.Stdout) + case "zsh": + cmd.Root().GenZshCompletion(os.Stdout) + case "fish": + cmd.Root().GenFishCompletion(os.Stdout, true) + case "powershell": + cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + } + }, + } + + return cmd +} diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/defrag_command.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/defrag_command.go index 1660dd7071..fe4f8430d5 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/defrag_command.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/defrag_command.go @@ -21,8 +21,8 @@ import ( "github.com/spf13/cobra" "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/server/v3/datadir" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/datadir" ) var ( @@ -38,6 +38,7 @@ func NewDefragCommand() *cobra.Command { } cmd.Flags().StringVar(&defragDataDir, "data-dir", "", "Required. Defragments a data directory not in use by etcd.") cmd.MarkFlagRequired("data-dir") + cmd.MarkFlagDirname("data-dir") return cmd } @@ -65,7 +66,7 @@ func DefragData(dataDir string) error { case <-bch: case <-time.After(time.Second): fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q. "+ - "To defrag a running etcd instance, omit --data-dir.\n", dbDir) + "To defrag a running etcd instance, use `etcdctl defrag` instead.\n", dbDir) <-bch } return be.Defrag() diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/migrate_command.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/migrate_command.go new file mode 100644 index 0000000000..e9bac4f37a --- /dev/null +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/migrate_command.go @@ -0,0 +1,156 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdutl + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/spf13/cobra" + "go.uber.org/zap" + + "go.etcd.io/etcd/pkg/v3/cobrautl" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/datadir" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" +) + +// NewMigrateCommand prints out the version of etcd. +func NewMigrateCommand() *cobra.Command { + o := newMigrateOptions() + cmd := &cobra.Command{ + Use: "migrate", + Short: "Migrates schema of etcd data dir files to make them compatible with different etcd version", + Run: func(cmd *cobra.Command, args []string) { + cfg, err := o.Config() + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitBadArgs, err) + } + err = migrateCommandFunc(cfg) + if err != nil { + cobrautl.ExitWithError(cobrautl.ExitError, err) + } + }, + } + o.AddFlags(cmd) + return cmd +} + +type migrateOptions struct { + dataDir string + targetVersion string + force bool +} + +func newMigrateOptions() *migrateOptions { + return &migrateOptions{} +} + +func (o *migrateOptions) AddFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.dataDir, "data-dir", o.dataDir, "Path to the etcd data dir") + cmd.MarkFlagRequired("data-dir") + cmd.MarkFlagDirname("data-dir") + + cmd.Flags().StringVar(&o.targetVersion, "target-version", o.targetVersion, `Target etcd version to migrate contents of data dir. Minimal value 3.5. Format "X.Y" for example 3.6.`) + cmd.MarkFlagRequired("target-version") + + cmd.Flags().BoolVar(&o.force, "force", o.force, "Ignore migration failure and forcefully override storage version. Not recommended.") +} + +func (o *migrateOptions) Config() (*migrateConfig, error) { + c := &migrateConfig{ + force: o.force, + } + var err error + dotCount := strings.Count(o.targetVersion, ".") + if dotCount != 1 { + return nil, fmt.Errorf(`wrong target version format, expected "X.Y", got %q`, o.targetVersion) + } + c.targetVersion, err = semver.NewVersion(o.targetVersion + ".0") + if err != nil { + return nil, fmt.Errorf("failed to parse target version: %w", err) + } + if c.targetVersion.LessThan(schema.V3_5) { + return nil, fmt.Errorf(`target version %q not supported. Minimal "3.5"`, storageVersionToString(c.targetVersion)) + } + + dbPath := datadir.ToBackendFileName(o.dataDir) + c.be = backend.NewDefaultBackend(dbPath) + + walPath := datadir.ToWalDir(o.dataDir) + w, err := wal.OpenForRead(GetLogger(), walPath, walpb.Snapshot{}) + if err != nil { + return nil, fmt.Errorf(`failed to open wal: %v`, err) + } + defer w.Close() + c.walVersion, err = wal.ReadWALVersion(w) + if err != nil { + return nil, fmt.Errorf(`failed to read wal: %v`, err) + } + + return c, nil +} + +type migrateConfig struct { + be backend.Backend + targetVersion *semver.Version + walVersion schema.WALVersion + force bool +} + +func migrateCommandFunc(c *migrateConfig) error { + defer c.be.Close() + lg := GetLogger() + tx := c.be.BatchTx() + current, err := schema.DetectSchemaVersion(lg, tx) + if err != nil { + lg.Error("failed to detect storage version. Please make sure you are using data dir from etcd v3.5 and older") + return err + } + if current == *c.targetVersion { + lg.Info("storage version up-to-date", zap.String("storage-version", storageVersionToString(¤t))) + return nil + } + err = schema.Migrate(lg, tx, c.walVersion, *c.targetVersion) + if err != nil { + if !c.force { + return err + } + lg.Info("normal migrate failed, trying with force", zap.Error(err)) + migrateForce(lg, tx, c.targetVersion) + } + c.be.ForceCommit() + return nil +} + +func migrateForce(lg *zap.Logger, tx backend.BatchTx, target *semver.Version) { + tx.Lock() + defer tx.Unlock() + // Storage version is only supported since v3.6 + if target.LessThan(schema.V3_6) { + schema.UnsafeClearStorageVersion(tx) + lg.Warn("forcefully cleared storage version") + } else { + schema.UnsafeSetStorageVersion(tx, target) + lg.Warn("forcefully set storage version", zap.String("storage-version", storageVersionToString(target))) + } +} + +func storageVersionToString(ver *semver.Version) string { + return fmt.Sprintf("%d.%d", ver.Major, ver.Minor) +} diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer.go index ad4e60246e..1af5a875f4 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer.go @@ -66,12 +66,13 @@ func newPrinterUnsupported(n string) printer { func (p *printerUnsupported) DBStatus(snapshot.Status) { p.p(nil) } func makeDBStatusTable(ds snapshot.Status) (hdr []string, rows [][]string) { - hdr = []string{"hash", "revision", "total keys", "total size"} + hdr = []string{"hash", "revision", "total keys", "total size", "version"} rows = append(rows, []string{ fmt.Sprintf("%x", ds.Hash), fmt.Sprint(ds.Revision), fmt.Sprint(ds.TotalKey), humanize.Bytes(uint64(ds.TotalSize)), + ds.Version, }) return hdr, rows } diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer_fields.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer_fields.go index 374312cf5d..d534e396ff 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer_fields.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/printer_fields.go @@ -27,4 +27,5 @@ func (p *fieldsPrinter) DBStatus(r snapshot.Status) { fmt.Println(`"Revision" :`, r.Revision) fmt.Println(`"Keys" :`, r.TotalKey) fmt.Println(`"Size" :`, r.TotalSize) + fmt.Println(`"Version" :`, r.Version) } diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/snapshot_command.go b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/snapshot_command.go index 94ab2a5ac9..1b3f5ef6b9 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/snapshot_command.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/etcdutl/snapshot_command.go @@ -20,7 +20,7 @@ import ( "go.etcd.io/etcd/etcdutl/v3/snapshot" "go.etcd.io/etcd/pkg/v3/cobrautl" - "go.etcd.io/etcd/server/v3/datadir" + "go.etcd.io/etcd/server/v3/storage/datadir" "github.com/spf13/cobra" ) @@ -92,7 +92,8 @@ func NewSnapshotRestoreCommand() *cobra.Command { cmd.Flags().StringVar(&restoreName, "name", defaultName, "Human-readable name for this member") cmd.Flags().BoolVar(&skipHashCheck, "skip-hash-check", false, "Ignore snapshot integrity hash value (required if copied from data directory)") - cmd.MarkFlagRequired("data-dir") + cmd.MarkFlagDirname("data-dir") + cmd.MarkFlagDirname("wal-dir") return cmd } diff --git a/vendor/go.etcd.io/etcd/etcdutl/v3/snapshot/v3_snapshot.go b/vendor/go.etcd.io/etcd/etcdutl/v3/snapshot/v3_snapshot.go index 9272a8f0b1..cf7b4a6ebf 100644 --- a/vendor/go.etcd.io/etcd/etcdutl/v3/snapshot/v3_snapshot.go +++ b/vendor/go.etcd.io/etcd/etcdutl/v3/snapshot/v3_snapshot.go @@ -40,23 +40,24 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.etcd.io/etcd/server/v3/verify" - "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" "go.uber.org/zap" ) // Manager defines snapshot methods. type Manager interface { - // Save fetches snapshot from remote etcd server and saves data - // to target path. If the context "ctx" is canceled or timed out, + // Save fetches snapshot from remote etcd server, saves data + // to target path and returns server version. If the context "ctx" is canceled or timed out, // snapshot save stream will error out (e.g. context.Canceled, // context.DeadlineExceeded). Make sure to specify only one endpoint // in client configuration. Snapshot API must be requested to a // selected node, and saved snapshot is the point-in-time state of // the selected node. - Save(ctx context.Context, cfg clientv3.Config, dbPath string) error + Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) // Status returns the snapshot file information. Status(dbPath string) (Status, error) @@ -96,8 +97,8 @@ func hasChecksum(n int64) bool { } // Save fetches snapshot from remote etcd server and saves data to target path. -func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) error { - return snapshot.Save(ctx, s.lg, cfg, dbPath) +func (s *v3Manager) Save(ctx context.Context, cfg clientv3.Config, dbPath string) (version string, err error) { + return snapshot.SaveWithVersion(ctx, s.lg, cfg, dbPath) } // Status is the snapshot file status. @@ -106,6 +107,9 @@ type Status struct { Revision int64 `json:"revision"` TotalKey int `json:"totalKey"` TotalSize int64 `json:"totalSize"` + // Version is equal to storageVersion of the snapshot + // Empty if server does not supports versioned snapshots (= 0; i-- { + ss.cleanups[i]() + } +} + +// Addr gets the address the server listening on. +func (ss *StubServer) Addr() string { + return ss.Address +} + +type dummyStubServer struct { + testpb.UnimplementedTestServiceServer + body []byte +} + +func (d dummyStubServer) UnaryCall(context.Context, *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { + return &testpb.SimpleResponse{ + Payload: &testpb.Payload{ + Type: testpb.PayloadType_COMPRESSABLE, + Body: d.body, + }, + }, nil +} + +// NewDummyStubServer creates a simple test server that serves Unary calls with +// responses with the given payload. +func NewDummyStubServer(body []byte) *StubServer { + return New(dummyStubServer{body: body}) +} diff --git a/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go index 3bf58a3a1d..41758138a4 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go @@ -21,7 +21,6 @@ package httputil import ( "io" - "io/ioutil" "net" "net/http" ) @@ -31,7 +30,7 @@ import ( // therefore available for reuse. // Borrowed from golang/net/context/ctxhttp/cancelreq.go. func GracefulClose(resp *http.Response) { - io.Copy(ioutil.Discard, resp.Body) + io.Copy(io.Discard, resp.Body) resp.Body.Close() } diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go index bf737a4d94..43c93ba3cc 100644 --- a/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go +++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go @@ -148,20 +148,31 @@ func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (b if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b)) } + + sort.Sort(types.URLs(a)) + sort.Sort(types.URLs(b)) + var needResolve bool + for i := range a { + if !reflect.DeepEqual(a[i], b[i]) { + needResolve = true + break + } + } + if !needResolve { + return true, nil + } + + // If URLs are not equal, try to resolve it and compare again. urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b}) if err != nil { return false, err } - preva, prevb := a, b a, b = urls[0], urls[1] sort.Sort(types.URLs(a)) sort.Sort(types.URLs(b)) for i := range a { if !reflect.DeepEqual(a[i], b[i]) { - return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)", - a[i].String(), preva[i].String(), - b[i].String(), prevb[i].String(), - ) + return false, fmt.Errorf("resolved urls: %q != %q", a[i].String(), b[i].String()) } } return true, nil @@ -174,21 +185,13 @@ func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string if len(a) != len(b) { return false, fmt.Errorf("len(%q) != len(%q)", a, b) } - urlsA := make([]url.URL, 0) - for _, str := range a { - u, err := url.Parse(str) - if err != nil { - return false, fmt.Errorf("failed to parse %q", str) - } - urlsA = append(urlsA, *u) + urlsA, err := stringsToURLs(a) + if err != nil { + return false, err } - urlsB := make([]url.URL, 0) - for _, str := range b { - u, err := url.Parse(str) - if err != nil { - return false, fmt.Errorf("failed to parse %q", str) - } - urlsB = append(urlsB, *u) + urlsB, err := stringsToURLs(b) + if err != nil { + return false, err } if lg == nil { lg, _ = zap.NewProduction() @@ -207,6 +210,18 @@ func urlsToStrings(us []url.URL) []string { return rs } +func stringsToURLs(us []string) ([]url.URL, error) { + urls := make([]url.URL, 0, len(us)) + for _, str := range us { + u, err := url.Parse(str) + if err != nil { + return nil, fmt.Errorf("failed to parse string to URL: %q", str) + } + urls = append(urls, *u) + } + return urls, nil +} + func IsNetworkTimeoutError(err error) bool { nerr, ok := err.(net.Error) return ok && nerr.Timeout() diff --git a/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go new file mode 100644 index 0000000000..8925a1ea21 --- /dev/null +++ b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go @@ -0,0 +1,52 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notify + +import ( + "sync" +) + +// Notifier is a thread safe struct that can be used to send notification about +// some event to multiple consumers. +type Notifier struct { + mu sync.RWMutex + channel chan struct{} +} + +// NewNotifier returns new notifier +func NewNotifier() *Notifier { + return &Notifier{ + channel: make(chan struct{}), + } +} + +// Receive returns channel that can be used to wait for notification. +// Consumers will be informed by closing the channel. +func (n *Notifier) Receive() <-chan struct{} { + n.mu.RLock() + defer n.mu.RUnlock() + return n.channel +} + +// Notify closes the channel passed to consumers and creates new channel to used +// for next notification. +func (n *Notifier) Notify() { + newChannel := make(chan struct{}) + n.mu.Lock() + channelToClose := n.channel + n.channel = newChannel + n.mu.Unlock() + close(channelToClose) +} diff --git a/vendor/go.etcd.io/etcd/raft/v3/log.go b/vendor/go.etcd.io/etcd/raft/v3/log.go index c94c41f778..82cf54aa27 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/log.go +++ b/vendor/go.etcd.io/etcd/raft/v3/log.go @@ -95,6 +95,9 @@ func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) default: offset := index + 1 + if ci-offset > uint64(len(ents)) { + l.logger.Panicf("index, %d, is out of range [%d]", ci-offset, len(ents)) + } l.append(ents[ci-offset:]...) } l.commitTo(min(committed, lastnewi)) diff --git a/vendor/go.etcd.io/etcd/raft/v3/logger.go b/vendor/go.etcd.io/etcd/raft/v3/logger.go index dc73b1f210..e3cb00cc9d 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/logger.go +++ b/vendor/go.etcd.io/etcd/raft/v3/logger.go @@ -16,7 +16,7 @@ package raft import ( "fmt" - "io/ioutil" + "io" "log" "os" "sync" @@ -60,7 +60,7 @@ func getLogger() Logger { var ( defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} - discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} raftLoggerMu sync.Mutex raftLogger = Logger(defaultLogger) ) diff --git a/vendor/go.etcd.io/etcd/raft/v3/node.go b/vendor/go.etcd.io/etcd/raft/v3/node.go index dca5954f7a..d374b6c0c2 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/node.go +++ b/vendor/go.etcd.io/etcd/raft/v3/node.go @@ -223,7 +223,10 @@ func StartNode(c *Config, peers []Peer) Node { if err != nil { panic(err) } - rn.Bootstrap(peers) + err = rn.Bootstrap(peers) + if err != nil { + c.Logger.Warningf("error occurred during starting a new node: %v", err) + } n := newNode(rn) @@ -369,14 +372,16 @@ func (n *node) run() { // very sound and likely has bugs. if _, okAfter := r.prs.Progress[r.id]; okBefore && !okAfter { var found bool - outer: for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} { for _, id := range sl { if id == r.id { found = true - break outer + break } } + if found { + break + } } if !found { propc = nil diff --git a/vendor/go.etcd.io/etcd/raft/v3/raft.go b/vendor/go.etcd.io/etcd/raft/v3/raft.go index c80262ebaf..5e3026ac88 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raft.go +++ b/vendor/go.etcd.io/etcd/raft/v3/raft.go @@ -647,7 +647,9 @@ func (r *raft) tickElection() { if r.promotable() && r.pastElectionTimeout() { r.electionElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgHup}); err != nil { + r.logger.Debugf("error occurred during election: %v", err) + } } } @@ -659,7 +661,9 @@ func (r *raft) tickHeartbeat() { if r.electionElapsed >= r.electionTimeout { r.electionElapsed = 0 if r.checkQuorum { - r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}); err != nil { + r.logger.Debugf("error occurred during checking sending heartbeat: %v", err) + } } // If current leader cannot transfer leadership in electionTimeout, it becomes leader again. if r.state == StateLeader && r.leadTransferee != None { @@ -673,7 +677,9 @@ func (r *raft) tickHeartbeat() { if r.heartbeatElapsed >= r.heartbeatTimeout { r.heartbeatElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) + if err := r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}); err != nil { + r.logger.Debugf("error occurred during checking sending heartbeat: %v", err) + } } } diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go b/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go index 1ee77a9a45..d2eaa5d944 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go +++ b/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.pb.go @@ -11,6 +11,7 @@ import ( _ "github.com/gogo/protobuf/gogoproto" proto "github.com/golang/protobuf/proto" + _ "go.etcd.io/etcd/api/v3/versionpb" ) // Reference imports to suppress errors if they are not otherwise used. @@ -695,72 +696,75 @@ func init() { func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } var fileDescriptor_b042552c306ae59b = []byte{ - // 1026 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xdb, 0x46, - 0x17, 0x25, 0x29, 0x5a, 0x3f, 0x57, 0xb2, 0x3c, 0xbe, 0xf1, 0x17, 0x10, 0x86, 0xc1, 0xe8, 0x53, - 0x52, 0x44, 0x70, 0x11, 0xb7, 0xd0, 0xa2, 0x28, 0xba, 0xf3, 0x4f, 0x00, 0xab, 0xb0, 0xdc, 0x54, - 0x76, 0xbc, 0x28, 0x50, 0x08, 0x63, 0x71, 0x44, 0xb3, 0x15, 0x39, 0x04, 0x39, 0x72, 0xed, 0x4d, - 0x51, 0xf4, 0x09, 0xba, 0xec, 0x26, 0xdb, 0x3e, 0x40, 0x9f, 0xc2, 0x4b, 0x03, 0xdd, 0x74, 0x15, - 0x34, 0xf6, 0x8b, 0x14, 0x33, 0x1c, 0x4a, 0x94, 0x6c, 0x64, 0xd1, 0xdd, 0xcc, 0xb9, 0x67, 0xee, - 0x9c, 0x73, 0xef, 0xe5, 0x10, 0x20, 0xa1, 0x63, 0xb1, 0x13, 0x27, 0x5c, 0x70, 0x2c, 0xcb, 0x75, - 0x7c, 0xbe, 0xb9, 0xe1, 0x73, 0x9f, 0x2b, 0xe8, 0x33, 0xb9, 0xca, 0xa2, 0xed, 0x9f, 0x61, 0xe5, - 0x75, 0x24, 0x92, 0x6b, 0x74, 0xc0, 0x3e, 0x65, 0x49, 0xe8, 0x58, 0x2d, 0xb3, 0x63, 0xef, 0xd9, - 0x37, 0xef, 0x9f, 0x19, 0x03, 0x85, 0xe0, 0x26, 0xac, 0xf4, 0x22, 0x8f, 0x5d, 0x39, 0xa5, 0x42, - 0x28, 0x83, 0xf0, 0x53, 0xb0, 0x4f, 0xaf, 0x63, 0xe6, 0x98, 0x2d, 0xb3, 0xd3, 0xec, 0xae, 0xef, - 0x64, 0x77, 0xed, 0xa8, 0x94, 0x32, 0x30, 0x4b, 0x74, 0x1d, 0x33, 0x44, 0xb0, 0x0f, 0xa8, 0xa0, - 0x8e, 0xdd, 0x32, 0x3b, 0x8d, 0x81, 0x5a, 0xb7, 0x7f, 0x31, 0x81, 0x9c, 0x44, 0x34, 0x4e, 0x2f, - 0xb8, 0xe8, 0x33, 0x41, 0x3d, 0x2a, 0x28, 0x7e, 0x01, 0x30, 0xe2, 0xd1, 0x78, 0x98, 0x0a, 0x2a, - 0xb2, 0xdc, 0xf5, 0x79, 0xee, 0x7d, 0x1e, 0x8d, 0x4f, 0x64, 0x40, 0xe7, 0xae, 0x8d, 0x72, 0x40, - 0x2a, 0x0d, 0x94, 0xd2, 0xa2, 0x89, 0x0c, 0x92, 0xfe, 0x84, 0xf4, 0x57, 0x34, 0xa1, 0x90, 0xf6, - 0x77, 0x50, 0xcd, 0x15, 0x48, 0x89, 0x52, 0x81, 0xba, 0xb3, 0x31, 0x50, 0x6b, 0xfc, 0x0a, 0xaa, - 0xa1, 0x56, 0xa6, 0x12, 0xd7, 0xbb, 0x4e, 0xae, 0x65, 0x59, 0xb9, 0xce, 0x3b, 0xe3, 0xb7, 0xdf, - 0x95, 0xa0, 0xd2, 0x67, 0x69, 0x4a, 0x7d, 0x86, 0xaf, 0xc0, 0x16, 0xf3, 0x5a, 0x3d, 0xc9, 0x73, - 0xe8, 0x70, 0xb1, 0x5a, 0x92, 0x86, 0x1b, 0x60, 0x09, 0xbe, 0xe0, 0xc4, 0x12, 0x5c, 0xda, 0x18, - 0x27, 0x7c, 0xc9, 0x86, 0x44, 0x66, 0x06, 0xed, 0x65, 0x83, 0xe8, 0x42, 0x65, 0xc2, 0x7d, 0xd5, - 0xdd, 0x95, 0x42, 0x30, 0x07, 0xe7, 0x65, 0x2b, 0x3f, 0x2c, 0xdb, 0x2b, 0xa8, 0xb0, 0x48, 0x24, - 0x01, 0x4b, 0x9d, 0x4a, 0xab, 0xd4, 0xa9, 0x77, 0x57, 0x17, 0x7a, 0x9c, 0xa7, 0xd2, 0x1c, 0xdc, - 0x82, 0xf2, 0x88, 0x87, 0x61, 0x20, 0x9c, 0x6a, 0x21, 0x97, 0xc6, 0xb0, 0x0b, 0xd5, 0x54, 0x57, - 0xcc, 0xa9, 0xa9, 0x4a, 0x92, 0xe5, 0x4a, 0xe6, 0x15, 0xcc, 0x79, 0x32, 0x63, 0xc2, 0x7e, 0x60, - 0x23, 0xe1, 0x40, 0xcb, 0xec, 0x54, 0xf3, 0x8c, 0x19, 0x86, 0x2f, 0x00, 0xb2, 0xd5, 0x61, 0x10, - 0x09, 0xa7, 0x5e, 0xb8, 0xb3, 0x80, 0xa3, 0x03, 0x95, 0x11, 0x8f, 0x04, 0xbb, 0x12, 0x4e, 0x43, - 0x35, 0x36, 0xdf, 0xb6, 0xbf, 0x87, 0xda, 0x21, 0x4d, 0xbc, 0x6c, 0x7c, 0xf2, 0x0a, 0x9a, 0x0f, - 0x2a, 0xe8, 0x80, 0x7d, 0xc9, 0x05, 0x5b, 0xfc, 0x38, 0x24, 0x52, 0x30, 0x5c, 0x7a, 0x68, 0xb8, - 0xfd, 0xa7, 0x09, 0xb5, 0xd9, 0xbc, 0xe2, 0x53, 0x28, 0xcb, 0x33, 0x49, 0xea, 0x98, 0xad, 0x52, - 0xc7, 0x1e, 0xe8, 0x1d, 0x6e, 0x42, 0x75, 0xc2, 0x68, 0x12, 0xc9, 0x88, 0xa5, 0x22, 0xb3, 0x3d, - 0xbe, 0x84, 0xb5, 0x8c, 0x35, 0xe4, 0x53, 0xe1, 0xf3, 0x20, 0xf2, 0x9d, 0x92, 0xa2, 0x34, 0x33, - 0xf8, 0x1b, 0x8d, 0xe2, 0x73, 0x58, 0xcd, 0x0f, 0x0d, 0x23, 0xe9, 0xd4, 0x56, 0xb4, 0x46, 0x0e, - 0x1e, 0xb3, 0x2b, 0x81, 0xcf, 0x01, 0xe8, 0x54, 0xf0, 0xe1, 0x84, 0xd1, 0x4b, 0xa6, 0x86, 0x21, - 0x2f, 0x68, 0x4d, 0xe2, 0x47, 0x12, 0x6e, 0xbf, 0x33, 0x01, 0xa4, 0xe8, 0xfd, 0x0b, 0x1a, 0xf9, - 0x0c, 0x3f, 0xd7, 0x63, 0x6b, 0xa9, 0xb1, 0x7d, 0x5a, 0xfc, 0x0c, 0x33, 0xc6, 0x83, 0xc9, 0x7d, - 0x09, 0x95, 0x88, 0x7b, 0x6c, 0x18, 0x78, 0xba, 0x28, 0x4d, 0x19, 0xbc, 0x7b, 0xff, 0xac, 0x7c, - 0xcc, 0x3d, 0xd6, 0x3b, 0x18, 0x94, 0x65, 0xb8, 0xe7, 0x15, 0xfb, 0x62, 0x2f, 0xf4, 0x05, 0x37, - 0xc1, 0x0a, 0x3c, 0xdd, 0x08, 0xd0, 0xa7, 0xad, 0xde, 0xc1, 0xc0, 0x0a, 0xbc, 0x76, 0x08, 0x64, - 0x7e, 0xf9, 0x49, 0x10, 0xf9, 0x93, 0xb9, 0x48, 0xf3, 0xbf, 0x88, 0xb4, 0x3e, 0x26, 0xb2, 0xfd, - 0x87, 0x09, 0x8d, 0x79, 0x9e, 0xb3, 0x2e, 0xee, 0x01, 0x88, 0x84, 0x46, 0x69, 0x20, 0x02, 0x1e, - 0xe9, 0x1b, 0xb7, 0x1e, 0xb9, 0x71, 0xc6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x84, 0xca, 0x48, - 0xb1, 0xb2, 0x8e, 0x17, 0x9e, 0x94, 0x65, 0x6b, 0xf9, 0x17, 0xa6, 0xe9, 0xc5, 0x9a, 0x95, 0x16, - 0x6a, 0xb6, 0x7d, 0x08, 0xb5, 0xd9, 0xbb, 0x8b, 0x6b, 0x50, 0x57, 0x9b, 0x63, 0x9e, 0x84, 0x74, - 0x42, 0x0c, 0x7c, 0x02, 0x6b, 0x0a, 0x98, 0xe7, 0x27, 0x26, 0xfe, 0x0f, 0xd6, 0x97, 0xc0, 0xb3, - 0x2e, 0xb1, 0xb6, 0xff, 0xb2, 0xa0, 0x5e, 0x78, 0x96, 0x10, 0xa0, 0xdc, 0x4f, 0xfd, 0xc3, 0x69, - 0x4c, 0x0c, 0xac, 0x43, 0xa5, 0x9f, 0xfa, 0x7b, 0x8c, 0x0a, 0x62, 0xea, 0xcd, 0x9b, 0x84, 0xc7, - 0xc4, 0xd2, 0xac, 0xdd, 0x38, 0x26, 0x25, 0x6c, 0x02, 0x64, 0xeb, 0x01, 0x4b, 0x63, 0x62, 0x6b, - 0xe2, 0x19, 0x17, 0x8c, 0xac, 0x48, 0x6d, 0x7a, 0xa3, 0xa2, 0x65, 0x1d, 0x95, 0x4f, 0x00, 0xa9, - 0x20, 0x81, 0x86, 0xbc, 0x8c, 0xd1, 0x44, 0x9c, 0xcb, 0x5b, 0xaa, 0xb8, 0x01, 0xa4, 0x88, 0xa8, - 0x43, 0x35, 0x44, 0x68, 0xf6, 0x53, 0xff, 0x6d, 0x94, 0x30, 0x3a, 0xba, 0xa0, 0xe7, 0x13, 0x46, - 0x00, 0xd7, 0x61, 0x55, 0x27, 0x92, 0x5f, 0xdc, 0x34, 0x25, 0x75, 0x4d, 0xdb, 0xbf, 0x60, 0xa3, - 0x1f, 0xbf, 0x9d, 0xf2, 0x64, 0x1a, 0x92, 0x86, 0xb4, 0xdd, 0x4f, 0x7d, 0xd5, 0xa0, 0x31, 0x4b, - 0x8e, 0x18, 0xf5, 0x58, 0x42, 0x56, 0xf5, 0xe9, 0xd3, 0x20, 0x64, 0x7c, 0x2a, 0x8e, 0xf9, 0x4f, - 0xa4, 0xa9, 0xc5, 0x0c, 0x18, 0xf5, 0xd4, 0xff, 0x8e, 0xac, 0x69, 0x31, 0x33, 0x44, 0x89, 0x21, - 0xda, 0xef, 0x9b, 0x84, 0x29, 0x8b, 0xeb, 0xfa, 0x56, 0xbd, 0x57, 0x1c, 0xdc, 0xfe, 0xd5, 0x84, - 0x8d, 0xc7, 0xc6, 0x03, 0xb7, 0xc0, 0x79, 0x0c, 0xdf, 0x9d, 0x0a, 0x4e, 0x0c, 0xfc, 0x04, 0xfe, - 0xff, 0x58, 0xf4, 0x6b, 0x1e, 0x44, 0xa2, 0x17, 0xc6, 0x93, 0x60, 0x14, 0xc8, 0x56, 0x7c, 0x8c, - 0xf6, 0xfa, 0x4a, 0xd3, 0xac, 0xed, 0x6b, 0x68, 0x2e, 0x7e, 0x14, 0xb2, 0x18, 0x73, 0x64, 0xd7, - 0xf3, 0xe4, 0xf8, 0x13, 0x03, 0x9d, 0xa2, 0xd8, 0x01, 0x0b, 0xf9, 0x25, 0x53, 0x11, 0x73, 0x31, - 0xf2, 0x36, 0xf6, 0xa8, 0xc8, 0x22, 0xd6, 0xa2, 0x91, 0x5d, 0xcf, 0x3b, 0xca, 0xde, 0x1e, 0x15, - 0x2d, 0xed, 0xbd, 0xb8, 0xf9, 0xe0, 0x1a, 0xb7, 0x1f, 0x5c, 0xe3, 0xe6, 0xce, 0x35, 0x6f, 0xef, - 0x5c, 0xf3, 0x9f, 0x3b, 0xd7, 0xfc, 0xed, 0xde, 0x35, 0x7e, 0xbf, 0x77, 0x8d, 0xdb, 0x7b, 0xd7, - 0xf8, 0xfb, 0xde, 0x35, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xee, 0xe3, 0x39, 0x8b, 0xbb, 0x08, - 0x00, 0x00, + // 1079 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6b, 0xe3, 0x46, + 0x14, 0x96, 0x64, 0xc5, 0xb2, 0x9f, 0x1d, 0x67, 0x32, 0x9b, 0x2e, 0xc2, 0x2c, 0x5a, 0xd7, 0xbb, + 0x65, 0x4d, 0xca, 0x26, 0x8b, 0xbb, 0x94, 0xb2, 0xb7, 0xfc, 0x58, 0x48, 0x4a, 0x9c, 0x6e, 0x9d, + 0x6c, 0x0e, 0x0b, 0x25, 0x4c, 0xac, 0xb1, 0xa2, 0xd6, 0xd6, 0x88, 0xd1, 0x38, 0x4d, 0x6e, 0xa5, + 0x97, 0x1e, 0x7a, 0x29, 0x3d, 0x95, 0x42, 0xaf, 0xbd, 0x16, 0x0a, 0xfd, 0x1f, 0x72, 0x0c, 0xf4, + 0xd2, 0xd3, 0xd2, 0x4d, 0xfe, 0x91, 0x32, 0xa3, 0x91, 0x25, 0x3b, 0x61, 0x0f, 0xbd, 0xcd, 0x7c, + 0xef, 0xd3, 0x7b, 0xdf, 0xfb, 0xde, 0xcc, 0x08, 0x80, 0x93, 0xa1, 0x58, 0x8b, 0x39, 0x13, 0x0c, + 0x97, 0xe5, 0x3a, 0x3e, 0x69, 0xae, 0x04, 0x2c, 0x60, 0x0a, 0x5a, 0x97, 0xab, 0x34, 0xda, 0x6c, + 0x51, 0x31, 0xf0, 0xd7, 0x49, 0x1c, 0xae, 0x9f, 0x51, 0x9e, 0x84, 0x2c, 0x8a, 0x4f, 0xb2, 0x55, + 0xca, 0x68, 0xff, 0x60, 0xc2, 0xc2, 0xcb, 0x48, 0xf0, 0x0b, 0xec, 0x82, 0x7d, 0x48, 0xf9, 0xd8, + 0xb5, 0x5a, 0x66, 0xc7, 0xde, 0xb4, 0x2f, 0xdf, 0x3e, 0x34, 0xfa, 0x0a, 0xc1, 0x4d, 0x58, 0xd8, + 0x8d, 0x7c, 0x7a, 0xee, 0x96, 0x0a, 0xa1, 0x14, 0xc2, 0x1f, 0x83, 0x7d, 0x78, 0x11, 0x53, 0xd7, + 0x6c, 0x99, 0x9d, 0x46, 0x77, 0x79, 0x2d, 0x95, 0xb3, 0xa6, 0x52, 0xca, 0xc0, 0x34, 0xd1, 0x45, + 0x4c, 0x31, 0x06, 0x7b, 0x9b, 0x08, 0xe2, 0xda, 0x2d, 0xb3, 0x53, 0xef, 0xab, 0xf5, 0x0b, 0xe7, + 0xfb, 0xbf, 0xdc, 0xd2, 0x27, 0x6b, 0xcf, 0xda, 0xdf, 0x99, 0x80, 0x0e, 0x22, 0x12, 0x27, 0xa7, + 0x4c, 0xf4, 0xa8, 0x20, 0x3e, 0x11, 0x04, 0x7f, 0x0a, 0x30, 0x60, 0xd1, 0xf0, 0x38, 0x11, 0x44, + 0xa4, 0x45, 0x6a, 0x79, 0x91, 0x2d, 0x16, 0x0d, 0x0f, 0x64, 0x40, 0x17, 0xa9, 0x0e, 0x32, 0x40, + 0x4a, 0x0e, 0x95, 0xe4, 0x62, 0x37, 0x29, 0x24, 0x1b, 0x15, 0xb2, 0xd1, 0x62, 0x37, 0x0a, 0x69, + 0xbf, 0x81, 0x4a, 0xa6, 0x40, 0x6a, 0x95, 0x0a, 0x54, 0xcd, 0x7a, 0x5f, 0xad, 0xf1, 0x0b, 0xa8, + 0x8c, 0xb5, 0x32, 0x95, 0xb8, 0xd6, 0x75, 0x33, 0x2d, 0xf3, 0xca, 0x75, 0xde, 0x29, 0xbf, 0xfd, + 0x5b, 0x09, 0x9c, 0x1e, 0x4d, 0x12, 0x12, 0x50, 0xfc, 0x14, 0x6c, 0x91, 0x9b, 0x76, 0x2f, 0xcb, + 0xa1, 0xc3, 0x45, 0xdb, 0x24, 0x0d, 0xaf, 0x80, 0x25, 0xd8, 0x4c, 0x27, 0x96, 0x60, 0xb2, 0x8d, + 0x21, 0x67, 0x73, 0x6d, 0x48, 0x64, 0xda, 0xa0, 0x3d, 0xdf, 0x20, 0xf6, 0xc0, 0x19, 0xb1, 0x40, + 0x8d, 0x79, 0xa1, 0x10, 0xcc, 0xc0, 0xdc, 0xb6, 0xf2, 0x6d, 0xdb, 0x9e, 0x82, 0x43, 0x23, 0xc1, + 0x43, 0x9a, 0xb8, 0x4e, 0xab, 0xd4, 0xa9, 0x75, 0x17, 0x67, 0x86, 0x9d, 0xa5, 0xd2, 0x1c, 0xfc, + 0x00, 0xca, 0x03, 0x36, 0x1e, 0x87, 0xc2, 0xad, 0x14, 0x72, 0x69, 0x0c, 0x77, 0xa1, 0x92, 0x68, + 0xc7, 0xdc, 0xaa, 0x72, 0x12, 0xcd, 0x3b, 0x99, 0x39, 0x98, 0xf1, 0x64, 0x46, 0x4e, 0xbf, 0xa6, + 0x03, 0xe1, 0x42, 0xcb, 0xec, 0x54, 0xb2, 0x8c, 0x29, 0x86, 0x1f, 0x03, 0xa4, 0xab, 0x9d, 0x30, + 0x12, 0x6e, 0xad, 0x50, 0xb3, 0x80, 0x63, 0x17, 0x9c, 0x01, 0x8b, 0x04, 0x3d, 0x17, 0x6e, 0x5d, + 0x0d, 0x36, 0xdb, 0xb6, 0xbf, 0x82, 0xea, 0x0e, 0xe1, 0x7e, 0x7a, 0x7c, 0x32, 0x07, 0xcd, 0x5b, + 0x0e, 0xba, 0x60, 0x9f, 0x31, 0x41, 0x67, 0x6f, 0x89, 0x44, 0x0a, 0x0d, 0x97, 0x6e, 0x37, 0xdc, + 0xfe, 0xd3, 0x84, 0xea, 0xf4, 0xbc, 0xe2, 0xfb, 0x50, 0x96, 0xdf, 0xf0, 0xc4, 0x35, 0x5b, 0xa5, + 0x8e, 0xdd, 0xd7, 0x3b, 0xdc, 0x84, 0xca, 0x88, 0x12, 0x1e, 0xc9, 0x88, 0xa5, 0x22, 0xd3, 0x3d, + 0x7e, 0x02, 0x4b, 0x29, 0xeb, 0x98, 0x4d, 0x44, 0xc0, 0xc2, 0x28, 0x70, 0x4b, 0x8a, 0xd2, 0x48, + 0xe1, 0x2f, 0x34, 0x8a, 0x1f, 0xc1, 0x62, 0xf6, 0xd1, 0x71, 0x24, 0x3b, 0xb5, 0x15, 0xad, 0x9e, + 0x81, 0xfb, 0xf4, 0x5c, 0xe0, 0x47, 0x00, 0x64, 0x22, 0xd8, 0xf1, 0x88, 0x92, 0x33, 0xaa, 0x0e, + 0x43, 0x66, 0x68, 0x55, 0xe2, 0x7b, 0x12, 0x6e, 0xff, 0x6e, 0x02, 0x48, 0xd1, 0x5b, 0xa7, 0x24, + 0x0a, 0x28, 0x7e, 0xa6, 0x8f, 0xad, 0xa5, 0x8e, 0xed, 0xfd, 0xe2, 0x35, 0x4c, 0x19, 0xb7, 0x4e, + 0xee, 0x13, 0x70, 0x22, 0xe6, 0xd3, 0xe3, 0xd0, 0xd7, 0xa6, 0x34, 0x64, 0xf0, 0xfa, 0xed, 0xc3, + 0xf2, 0x3e, 0xf3, 0xe9, 0xee, 0x76, 0xbf, 0x2c, 0xc3, 0xbb, 0x7e, 0x71, 0x2e, 0xf6, 0xcc, 0x5c, + 0x70, 0x13, 0xac, 0xd0, 0xd7, 0x83, 0x00, 0xfd, 0xb5, 0xb5, 0xbb, 0xdd, 0xb7, 0x42, 0x3f, 0x7f, + 0x3b, 0xc6, 0x80, 0x72, 0x15, 0x07, 0x61, 0x14, 0x8c, 0x72, 0xb5, 0xe6, 0xff, 0x51, 0x6b, 0xbd, + 0x4f, 0x6d, 0xfb, 0x0f, 0x13, 0xea, 0x79, 0x9e, 0xa3, 0x2e, 0xde, 0x04, 0x10, 0x9c, 0x44, 0x49, + 0x28, 0x42, 0x16, 0xe9, 0x8a, 0x0f, 0xee, 0xa8, 0x38, 0xe5, 0x64, 0x47, 0x33, 0xff, 0x0a, 0x7f, + 0x06, 0xce, 0x40, 0xb1, 0xd2, 0xd1, 0x17, 0xde, 0x96, 0xf9, 0xd6, 0xb2, 0xab, 0xa6, 0xe9, 0x45, + 0xf3, 0x4a, 0x33, 0xe6, 0x65, 0x06, 0x3d, 0x5f, 0x7d, 0x03, 0xd5, 0xe9, 0x93, 0x8c, 0x97, 0xa0, + 0xa6, 0x36, 0xfb, 0x8c, 0x8f, 0xc9, 0x08, 0x19, 0xf8, 0x1e, 0x2c, 0x29, 0x20, 0x2f, 0x84, 0x4c, + 0xec, 0xc1, 0xf2, 0x1c, 0x78, 0xd4, 0x45, 0x56, 0xd3, 0xf9, 0x35, 0x4d, 0xd9, 0x74, 0x7e, 0x4e, + 0xcd, 0x5f, 0xfd, 0xdb, 0x82, 0x5a, 0xe1, 0xe9, 0xc2, 0x00, 0xe5, 0x5e, 0x12, 0xec, 0x4c, 0x62, + 0x64, 0xe0, 0x1a, 0x38, 0xbd, 0x24, 0xd8, 0xa4, 0x44, 0x20, 0x53, 0x6f, 0x5e, 0x71, 0x16, 0x23, + 0x4b, 0xb3, 0x36, 0xe2, 0x18, 0x95, 0x70, 0x03, 0x20, 0x5d, 0xf7, 0x69, 0x12, 0x23, 0x5b, 0x13, + 0x8f, 0x98, 0xa0, 0x68, 0x41, 0xaa, 0xd5, 0x1b, 0x15, 0x2d, 0xeb, 0xa8, 0x7c, 0x26, 0x90, 0x83, + 0x11, 0xd4, 0x65, 0x31, 0x4a, 0xb8, 0x38, 0x91, 0x55, 0x2a, 0x78, 0x05, 0x50, 0x11, 0x51, 0x1f, + 0x55, 0x31, 0x86, 0x46, 0x2f, 0x09, 0x5e, 0x47, 0x9c, 0x92, 0xc1, 0x29, 0x39, 0x19, 0x51, 0x04, + 0x78, 0x19, 0x16, 0x75, 0x22, 0x79, 0x2b, 0x27, 0x09, 0xaa, 0x69, 0xda, 0xd6, 0x29, 0x1d, 0x7c, + 0xf3, 0xe5, 0x84, 0xf1, 0xc9, 0x18, 0xd5, 0xf1, 0x07, 0xb0, 0xdc, 0x4b, 0x02, 0x35, 0xbb, 0x21, + 0xe5, 0x7b, 0x94, 0xf8, 0x94, 0xa3, 0x45, 0xfd, 0xf5, 0x61, 0x38, 0xa6, 0x6c, 0x22, 0xf6, 0xd9, + 0xb7, 0xa8, 0xa1, 0xc5, 0xf4, 0x29, 0xf1, 0xd5, 0xcf, 0x11, 0x2d, 0x69, 0x31, 0x53, 0x44, 0x89, + 0x41, 0xba, 0xdf, 0x57, 0x9c, 0xaa, 0x16, 0x97, 0x75, 0x55, 0xbd, 0x57, 0x1c, 0xbc, 0xfa, 0xa3, + 0x09, 0x2b, 0x77, 0x9d, 0x1c, 0xfc, 0x00, 0xdc, 0xbb, 0xf0, 0x8d, 0x89, 0x60, 0xc8, 0xc0, 0x1f, + 0xc1, 0x87, 0x77, 0x45, 0x3f, 0x67, 0x61, 0x24, 0x76, 0xc7, 0xf1, 0x28, 0x1c, 0x84, 0x72, 0x14, + 0xef, 0xa3, 0xbd, 0x3c, 0xd7, 0x34, 0x2b, 0x9b, 0xf1, 0xf3, 0xd5, 0x0b, 0x68, 0xcc, 0x5e, 0x1c, + 0xe9, 0x4a, 0x8e, 0x6c, 0xf8, 0xbe, 0xbc, 0x22, 0xc8, 0xc0, 0x6e, 0x51, 0x75, 0x9f, 0x8e, 0xd9, + 0x19, 0x55, 0x11, 0x73, 0x36, 0xf2, 0x3a, 0xf6, 0x89, 0x48, 0x23, 0xd6, 0x6c, 0x47, 0x1b, 0xbe, + 0xbf, 0x97, 0x3e, 0x54, 0x2a, 0x5a, 0xda, 0x7c, 0x7c, 0xf9, 0xce, 0x33, 0xae, 0xde, 0x79, 0xc6, + 0xe5, 0xb5, 0x67, 0x5e, 0x5d, 0x7b, 0xe6, 0xbf, 0xd7, 0x9e, 0xf9, 0xd3, 0x8d, 0x67, 0xfc, 0x72, + 0xe3, 0x19, 0x57, 0x37, 0x9e, 0xf1, 0xcf, 0x8d, 0x67, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0x96, + 0xae, 0xd0, 0x97, 0x14, 0x09, 0x00, 0x00, } func (m *Entry) Marshal() (dAtA []byte, err error) { diff --git a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto b/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto index f46a54c948..931fb0b975 100644 --- a/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto +++ b/vendor/go.etcd.io/etcd/raft/v3/raftpb/raft.proto @@ -2,6 +2,7 @@ syntax = "proto2"; package raftpb; import "gogoproto/gogo.proto"; +import "etcd/api/versionpb/version.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; @@ -13,12 +14,16 @@ option (gogoproto.goproto_unrecognized_all) = false; option (gogoproto.goproto_sizecache_all) = false; enum EntryType { + option (versionpb.etcd_version_enum) = "3.0"; + EntryNormal = 0; EntryConfChange = 1; // corresponds to pb.ConfChange - EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 + EntryConfChangeV2 = 2 [(versionpb.etcd_version_enum_value)="3.4"]; // corresponds to pb.ConfChangeV2 } message Entry { + option (versionpb.etcd_version_msg) = "3.0"; + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations optional EntryType Type = 1 [(gogoproto.nullable) = false]; @@ -89,6 +94,7 @@ message HardState { // ConfChangeTransition specifies the behavior of a configuration change with // respect to joint consensus. enum ConfChangeTransition { + option (versionpb.etcd_version_enum) = "3.4"; // Automatically use the simple protocol if possible, otherwise fall back // to ConfChangeJointImplicit. Most applications will want to use this. ConfChangeTransitionAuto = 0; @@ -131,14 +137,16 @@ enum ConfChangeType { } message ConfChange { + option (versionpb.etcd_version_msg) = "3.0"; + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; - optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; optional bytes context = 4; // NB: this is used only by etcd to thread through a unique identifier. // Ideally it should really use the Context instead. No counterpart to // this field exists in ConfChangeV2. - optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; } // ConfChangeSingle is an individual configuration change operation. Multiple @@ -181,6 +189,8 @@ message ConfChangeSingle { // // [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf message ConfChangeV2 { + option (versionpb.etcd_version_msg) = "3.4"; + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; optional bytes context = 3; diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go index d286f92c2e..dce741f7d0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go @@ -21,7 +21,7 @@ import ( "errors" "time" - jwt "github.com/form3tech-oss/jwt-go" + jwt "github.com/golang-jwt/jwt" "go.uber.org/zap" ) diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/options.go b/vendor/go.etcd.io/etcd/server/v3/auth/options.go index c0b039f759..970171da92 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/options.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/options.go @@ -18,10 +18,10 @@ import ( "crypto/ecdsa" "crypto/rsa" "fmt" - "io/ioutil" + "os" "time" - jwt "github.com/form3tech-oss/jwt-go" + jwt "github.com/golang-jwt/jwt" ) const ( @@ -70,14 +70,14 @@ func (opts *jwtOptions) Parse(optMap map[string]string) error { } if file := optMap[optPublicKey]; file != "" { - opts.PublicKey, err = ioutil.ReadFile(file) + opts.PublicKey, err = os.ReadFile(file) if err != nil { return err } } if file := optMap[optPrivateKey]; file != "" { - opts.PrivateKey, err = ioutil.ReadFile(file) + opts.PrivateKey, err = os.ReadFile(file) if err != nil { return err } @@ -145,7 +145,7 @@ func (opts *jwtOptions) rsaKey() (interface{}, error) { } // both keys provided, make sure they match - if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 { + if pub != nil && !pub.Equal(priv.Public()) { return nil, ErrKeyMismatch } @@ -183,8 +183,7 @@ func (opts *jwtOptions) ecKey() (interface{}, error) { } // both keys provided, make sure they match - if pub != nil && pub.Curve != priv.Curve && - pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 { + if pub != nil && !pub.Equal(priv.Public()) { return nil, ErrKeyMismatch } diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go index 7d77b16ea1..bae07ef524 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go @@ -17,13 +17,11 @@ package auth import ( "go.etcd.io/etcd/api/v3/authpb" "go.etcd.io/etcd/pkg/v3/adt" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.uber.org/zap" ) -func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions { - user := getUser(lg, tx, userName) +func getMergedPerms(tx AuthBatchTx, userName string) *unifiedRangePermissions { + user := tx.UnsafeGetUser(userName) if user == nil { return nil } @@ -32,7 +30,7 @@ func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifie writePerms := adt.NewIntervalTree() for _, roleName := range user.Roles { - role := getRole(lg, tx, roleName) + role := tx.UnsafeGetRole(roleName) if role == nil { continue } @@ -105,11 +103,11 @@ func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []b return false } -func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { +func (as *authStore) isRangeOpPermitted(tx AuthBatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { // assumption: tx is Lock()ed _, ok := as.rangePermCache[userName] if !ok { - perms := getMergedPerms(as.lg, tx, userName) + perms := getMergedPerms(tx, userName) if perms == nil { as.lg.Error( "failed to create a merged permission", diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go index 7b1b094ae1..5f66b56d16 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go @@ -20,6 +20,7 @@ package auth import ( "context" "crypto/rand" + "errors" "fmt" "math/big" "strconv" @@ -156,6 +157,11 @@ func (t *tokenSimple) invalidateUser(username string) { } func (t *tokenSimple) enable() { + t.simpleTokensMu.Lock() + defer t.simpleTokensMu.Unlock() + if t.simpleTokenKeeper != nil { // already enabled + return + } if t.simpleTokenTTL <= 0 { t.simpleTokenTTL = simpleTokenTTLDefault } @@ -207,7 +213,11 @@ func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) ( func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { // rev isn't used in simple token, it is only used in JWT - index := ctx.Value(AuthenticateParamIndex{}).(uint64) + var index uint64 + var ok bool + if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok { + return "", errors.New("failed to assign") + } simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string) token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index) t.assignSimpleTokenToUser(username, token) diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/store.go b/vendor/go.etcd.io/etcd/server/v3/auth/store.go index 44c1d35fda..408b235bab 100644 --- a/vendor/go.etcd.io/etcd/server/v3/auth/store.go +++ b/vendor/go.etcd.io/etcd/server/v3/auth/store.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "encoding/base64" - "encoding/binary" "errors" "sort" "strings" @@ -29,8 +28,6 @@ import ( "go.etcd.io/etcd/api/v3/authpb" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" "go.uber.org/zap" "golang.org/x/crypto/bcrypt" @@ -40,11 +37,10 @@ import ( ) var ( - enableFlagKey = []byte("authEnabled") - authEnabled = []byte{1} - authDisabled = []byte{0} + authEnabled = []byte{1} + authDisabled = []byte{0} - revisionKey = []byte("authRevision") + rootPerm = authpb.Permission{PermType: authpb.READWRITE, Key: []byte{}, RangeEnd: []byte{0}} ErrRootUserNotExist = errors.New("auth: root user does not exist") ErrRootRoleNotExist = errors.New("auth: root user does not have root role") @@ -77,8 +73,6 @@ const ( tokenTypeSimple = "simple" tokenTypeJWT = "jwt" - - revBytesLen = 8 ) type AuthInfo struct { @@ -107,7 +101,7 @@ type AuthStore interface { Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) // Recover recovers the state of auth store from the given backend - Recover(b backend.Backend) + Recover(be AuthBackend) // UserAdd adds a new user UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) @@ -199,12 +193,44 @@ type TokenProvider interface { genTokenPrefix() (string, error) } +type AuthBackend interface { + CreateAuthBuckets() + ForceCommit() + BatchTx() AuthBatchTx + + GetUser(string) *authpb.User + GetAllUsers() []*authpb.User + GetRole(string) *authpb.Role + GetAllRoles() []*authpb.Role +} + +type AuthBatchTx interface { + AuthReadTx + UnsafeSaveAuthEnabled(enabled bool) + UnsafeSaveAuthRevision(rev uint64) + UnsafePutUser(*authpb.User) + UnsafeDeleteUser(string) + UnsafePutRole(*authpb.Role) + UnsafeDeleteRole(string) +} + +type AuthReadTx interface { + UnsafeReadAuthEnabled() bool + UnsafeReadAuthRevision() uint64 + UnsafeGetUser(string) *authpb.User + UnsafeGetRole(string) *authpb.Role + UnsafeGetAllUsers() []*authpb.User + UnsafeGetAllRoles() []*authpb.Role + Lock() + Unlock() +} + type authStore struct { // atomic operations; need 64-bit align, or 32-bit tests will crash revision uint64 lg *zap.Logger - be backend.Backend + be AuthBackend enabled bool enabledMu sync.RWMutex @@ -221,15 +247,14 @@ func (as *authStore) AuthEnable() error { as.lg.Info("authentication is already enabled; ignored auth enable request") return nil } - b := as.be - tx := b.BatchTx() + tx := as.be.BatchTx() tx.Lock() defer func() { tx.Unlock() - b.ForceCommit() + as.be.ForceCommit() }() - u := getUser(as.lg, tx, rootUser) + u := tx.UnsafeGetUser(rootUser) if u == nil { return ErrRootUserNotExist } @@ -238,14 +263,13 @@ func (as *authStore) AuthEnable() error { return ErrRootRoleNotExist } - tx.UnsafePut(buckets.Auth, enableFlagKey, authEnabled) - + tx.UnsafeSaveAuthEnabled(true) as.enabled = true as.tokenProvider.enable() as.rangePermCache = make(map[string]*unifiedRangePermissions) - as.setRevision(getRevision(tx)) + as.setRevision(tx.UnsafeReadAuthRevision()) as.lg.Info("enabled authentication") return nil @@ -258,11 +282,13 @@ func (as *authStore) AuthDisable() { return } b := as.be + tx := b.BatchTx() tx.Lock() - tx.UnsafePut(buckets.Auth, enableFlagKey, authDisabled) + tx.UnsafeSaveAuthEnabled(false) as.commitRevision(tx) tx.Unlock() + b.ForceCommit() as.enabled = false @@ -285,12 +311,7 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string if !as.IsAuthEnabled() { return nil, ErrAuthNotEnabled } - - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - - user := getUser(as.lg, tx, username) + user := as.be.GetUser(username) if user == nil { return nil, ErrAuthFailed } @@ -328,7 +349,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { tx.Lock() defer tx.Unlock() - user = getUser(as.lg, tx, username) + user = tx.UnsafeGetUser(username) if user == nil { return 0, ErrAuthFailed } @@ -337,7 +358,7 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { return 0, ErrNoPasswordUser } - return getRevision(tx), nil + return tx.UnsafeReadAuthRevision(), nil }() if err != nil { return 0, err @@ -350,24 +371,21 @@ func (as *authStore) CheckPassword(username, password string) (uint64, error) { return revision, nil } -func (as *authStore) Recover(be backend.Backend) { - enabled := false +func (as *authStore) Recover(be AuthBackend) { as.be = be tx := be.BatchTx() tx.Lock() - _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - as.setRevision(getRevision(tx)) + enabled := tx.UnsafeReadAuthEnabled() + as.setRevision(tx.UnsafeReadAuthRevision()) tx.Unlock() as.enabledMu.Lock() as.enabled = enabled + if enabled { + as.tokenProvider.enable() + } as.enabledMu.Unlock() } @@ -388,7 +406,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user != nil { return nil, ErrUserAlreadyExist } @@ -415,8 +433,7 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, Password: password, Options: options, } - - putUser(as.lg, tx, newUser) + tx.UnsafePutUser(newUser) as.commitRevision(tx) @@ -434,12 +451,11 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } - - delUser(tx, r.Name) + tx.UnsafeDeleteUser(r.Name) as.commitRevision(tx) @@ -459,7 +475,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } @@ -480,8 +496,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p Password: password, Options: user.Options, } - - putUser(as.lg, tx, updatedUser) + tx.UnsafePutUser(updatedUser) as.commitRevision(tx) @@ -501,13 +516,13 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.User) + user := tx.UnsafeGetUser(r.User) if user == nil { return nil, ErrUserNotFound } if r.Role != rootRole { - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } @@ -527,7 +542,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser user.Roles = append(user.Roles, r.Role) sort.Strings(user.Roles) - putUser(as.lg, tx, user) + tx.UnsafePutUser(user) as.invalidateCachedPerm(r.User) @@ -543,10 +558,7 @@ func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUser } func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - user := getUser(as.lg, tx, r.Name) - tx.Unlock() + user := as.be.GetUser(r.Name) if user == nil { return nil, ErrUserNotFound @@ -558,10 +570,7 @@ func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, } func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - users := getAllUsers(as.lg, tx) - tx.Unlock() + users := as.be.GetAllUsers() resp := &pb.AuthUserListResponse{Users: make([]string, len(users))} for i := range users { @@ -584,7 +593,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, r.Name) + user := tx.UnsafeGetUser(r.Name) if user == nil { return nil, ErrUserNotFound } @@ -605,7 +614,7 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs return nil, ErrRoleNotGranted } - putUser(as.lg, tx, updatedUser) + tx.UnsafePutUser(updatedUser) as.invalidateCachedPerm(r.Name) @@ -622,25 +631,22 @@ func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUs } func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - defer tx.Unlock() - var resp pb.AuthRoleGetResponse - role := getRole(as.lg, tx, r.Role) + role := as.be.GetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } - resp.Perm = append(resp.Perm, role.KeyPermission...) + if rootRole == string(role.Name) { + resp.Perm = append(resp.Perm, &rootPerm) + } else { + resp.Perm = append(resp.Perm, role.KeyPermission...) + } return &resp, nil } func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - tx := as.be.BatchTx() - tx.Lock() - roles := getAllRoles(as.lg, tx) - tx.Unlock() + roles := as.be.GetAllRoles() resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))} for i := range roles { @@ -654,7 +660,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } @@ -673,7 +679,7 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) return nil, ErrPermissionNotGranted } - putRole(as.lg, tx, updatedRole) + tx.UnsafePutRole(updatedRole) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. @@ -700,14 +706,14 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Role) + role := tx.UnsafeGetRole(r.Role) if role == nil { return nil, ErrRoleNotFound } - delRole(tx, r.Role) + tx.UnsafeDeleteRole(r.Role) - users := getAllUsers(as.lg, tx) + users := tx.UnsafeGetAllUsers() for _, user := range users { updatedUser := &authpb.User{ Name: user.Name, @@ -725,7 +731,7 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete continue } - putUser(as.lg, tx, updatedUser) + tx.UnsafePutUser(updatedUser) as.invalidateCachedPerm(string(user.Name)) } @@ -745,7 +751,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Name) + role := tx.UnsafeGetRole(r.Name) if role != nil { return nil, ErrRoleAlreadyExist } @@ -754,7 +760,7 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, Name: []byte(r.Name), } - putRole(as.lg, tx, newRole) + tx.UnsafePutRole(newRole) as.commitRevision(tx) @@ -789,7 +795,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( tx.Lock() defer tx.Unlock() - role := getRole(as.lg, tx, r.Name) + role := tx.UnsafeGetRole(r.Name) if role == nil { return nil, ErrRoleNotFound } @@ -813,7 +819,7 @@ func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) ( sort.Sort(permSlice(role.KeyPermission)) } - putRole(as.lg, tx, role) + tx.UnsafePutRole(role) // TODO(mitake): currently single role update invalidates every cache // It should be optimized. @@ -853,7 +859,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE tx.Lock() defer tx.Unlock() - user := getUser(as.lg, tx, userName) + user := tx.UnsafeGetUser(userName) if user == nil { as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName)) return ErrPermissionDenied @@ -891,10 +897,7 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { return ErrUserEmpty } - tx := as.be.BatchTx() - tx.Lock() - u := getUser(as.lg, tx, authInfo.Username) - tx.Unlock() + u := as.be.GetUser(authInfo.Username) if u == nil { return ErrUserNotFound @@ -907,103 +910,6 @@ func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error { return nil } -func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User { - _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte(username), nil, 0) - if len(vs) == 0 { - return nil - } - - user := &authpb.User{} - err := user.Unmarshal(vs[0]) - if err != nil { - lg.Panic( - "failed to unmarshal 'authpb.User'", - zap.String("user-name", username), - zap.Error(err), - ) - } - return user -} - -func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User { - _, vs := tx.UnsafeRange(buckets.AuthUsers, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - users := make([]*authpb.User, len(vs)) - for i := range vs { - user := &authpb.User{} - err := user.Unmarshal(vs[i]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - users[i] = user - } - return users -} - -func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) { - b, err := user.Marshal() - if err != nil { - lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) - } - tx.UnsafePut(buckets.AuthUsers, user.Name, b) -} - -func delUser(tx backend.BatchTx, username string) { - tx.UnsafeDelete(buckets.AuthUsers, []byte(username)) -} - -func getRole(lg *zap.Logger, tx backend.BatchTx, rolename string) *authpb.Role { - _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte(rolename), nil, 0) - if len(vs) == 0 { - return nil - } - - role := &authpb.Role{} - err := role.Unmarshal(vs[0]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - return role -} - -func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role { - _, vs := tx.UnsafeRange(buckets.AuthRoles, []byte{0}, []byte{0xff}, -1) - if len(vs) == 0 { - return nil - } - - roles := make([]*authpb.Role, len(vs)) - for i := range vs { - role := &authpb.Role{} - err := role.Unmarshal(vs[i]) - if err != nil { - lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) - } - roles[i] = role - } - return roles -} - -func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) { - b, err := role.Marshal() - if err != nil { - lg.Panic( - "failed to marshal 'authpb.Role'", - zap.String("role-name", string(role.Name)), - zap.Error(err), - ) - } - - tx.UnsafePut(buckets.AuthRoles, role.Name, b) -} - -func delRole(tx backend.BatchTx, rolename string) { - tx.UnsafeDelete(buckets.AuthRoles, []byte(rolename)) -} - func (as *authStore) IsAuthEnabled() bool { as.enabledMu.RLock() defer as.enabledMu.RUnlock() @@ -1011,7 +917,7 @@ func (as *authStore) IsAuthEnabled() bool { } // NewAuthStore creates a new AuthStore. -func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore { +func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) *authStore { if lg == nil { lg = zap.NewNop() } @@ -1027,23 +933,12 @@ func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCo bcryptCost = bcrypt.DefaultCost } + be.CreateAuthBuckets() tx := be.BatchTx() tx.Lock() - - tx.UnsafeCreateBucket(buckets.Auth) - tx.UnsafeCreateBucket(buckets.AuthUsers) - tx.UnsafeCreateBucket(buckets.AuthRoles) - - enabled := false - _, vs := tx.UnsafeRange(buckets.Auth, enableFlagKey, nil, 0) - if len(vs) == 1 { - if bytes.Equal(vs[0], authEnabled) { - enabled = true - } - } - + enabled := tx.UnsafeReadAuthEnabled() as := &authStore{ - revision: getRevision(tx), + revision: tx.UnsafeReadAuthRevision(), lg: lg, be: be, enabled: enabled, @@ -1074,20 +969,9 @@ func hasRootRole(u *authpb.User) bool { return idx != len(u.Roles) && u.Roles[idx] == rootRole } -func (as *authStore) commitRevision(tx backend.BatchTx) { +func (as *authStore) commitRevision(tx AuthBatchTx) { atomic.AddUint64(&as.revision, 1) - revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.Revision()) - tx.UnsafePut(buckets.Auth, revisionKey, revBytes) -} - -func getRevision(tx backend.BatchTx) uint64 { - _, vs := tx.UnsafeRange(buckets.Auth, revisionKey, nil, 0) - if len(vs) != 1 { - // this can happen in the initialization phase - return 0 - } - return binary.BigEndian.Uint64(vs[0]) + tx.UnsafeSaveAuthRevision(as.Revision()) } func (as *authStore) setRevision(rev uint64) { @@ -1142,6 +1026,10 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) { } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { + if !as.IsAuthEnabled() { + return nil, nil + } + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil @@ -1282,7 +1170,7 @@ func (as *authStore) WithRoot(ctx context.Context) context.Context { func (as *authStore) HasRole(user, role string) bool { tx := as.be.BatchTx() tx.Lock() - u := getUser(as.lg, tx, user) + u := tx.UnsafeGetUser(user) tx.Unlock() if u == nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/config/config.go b/vendor/go.etcd.io/etcd/server/v3/config/config.go index b6e2109c22..75d7df6c4a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/config/config.go +++ b/vendor/go.etcd.io/etcd/server/v3/config/config.go @@ -25,7 +25,8 @@ import ( "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/netutil" - "go.etcd.io/etcd/server/v3/datadir" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" + "go.etcd.io/etcd/server/v3/storage/datadir" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" bolt "go.etcd.io/bbolt" @@ -34,12 +35,15 @@ import ( // ServerConfig holds the configuration of etcd as taken from the command line or discovery. type ServerConfig struct { - Name string + Name string + DiscoveryURL string DiscoveryProxy string - ClientURLs types.URLs - PeerURLs types.URLs - DataDir string + DiscoveryCfg v3discovery.DiscoveryConfig + + ClientURLs types.URLs + PeerURLs types.URLs + DataDir string // DedicatedWALDir config will make the etcd to write the WAL to the WALDir // rather than the dataDir/member/wal. DedicatedWALDir string @@ -80,6 +84,10 @@ type ServerConfig struct { TickMs uint ElectionTicks int + // WaitClusterReadyTimeout is the maximum time to wait for the + // cluster to be ready on startup before serving client requests. + WaitClusterReadyTimeout time.Duration + // InitialElectionTickAdvance is true, then local member fast-forwards // election ticks to speed up "initial" leader election trigger. This // benefits the case of larger election ticks. For instance, cross @@ -114,13 +122,15 @@ type ServerConfig struct { AutoCompactionRetention time.Duration AutoCompactionMode string CompactionBatchLimit int + CompactionSleepInterval time.Duration QuotaBackendBytes int64 MaxTxnOps uint // MaxRequestBytes is the maximum request size to send over raft. MaxRequestBytes uint - WarningApplyDuration time.Duration + WarningApplyDuration time.Duration + WarningUnaryRequestDuration time.Duration StrictReconfigCheck bool @@ -147,10 +157,12 @@ type ServerConfig struct { ForceNewCluster bool - // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. + // EnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. EnableLeaseCheckpoint bool // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints. LeaseCheckpointInterval time.Duration + // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. + LeaseCheckpointPersist bool EnableGRPCGateway bool @@ -183,6 +195,9 @@ type ServerConfig struct { // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. + ExperimentalMaxLearners int `json:"experimental-max-learners"` + // V2Deprecation defines a phase of v2store deprecation process. V2Deprecation V2DeprecationEnum `json:"v2-deprecation"` } @@ -293,7 +308,9 @@ func (c *ServerConfig) WALDir() string { func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") } -func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" } +func (c *ServerConfig) ShouldDiscover() bool { + return c.DiscoveryURL != "" || len(c.DiscoveryCfg.Endpoints) > 0 +} // ReqTimeout returns timeout for request to finish. func (c *ServerConfig) ReqTimeout() time.Duration { diff --git a/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go index 828bd9a8f4..862c3bb934 100644 --- a/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go +++ b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go @@ -17,7 +17,7 @@ package config type V2DeprecationEnum string const ( - // Default in v3.5. Issues a warning if v2store have meaningful content. + // No longer supported in v3.6 V2_DEPR_0_NOT_YET = V2DeprecationEnum("not-yet") // Default in v3.6. Meaningful v2 state is not allowed. // The V2 files are maintained for v3.5 rollback. @@ -28,7 +28,7 @@ const ( // ability to rollback to etcd v3.5. V2_DEPR_2_GONE = V2DeprecationEnum("gone") - V2_DEPR_DEFAULT = V2_DEPR_0_NOT_YET + V2_DEPR_DEFAULT = V2_DEPR_1_WRITE_ONLY ) func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool { diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config.go b/vendor/go.etcd.io/etcd/server/v3/embed/config.go index 380c0c3aaa..e6f9e3a323 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config.go @@ -15,8 +15,8 @@ package embed import ( + "errors" "fmt" - "io/ioutil" "net" "net/http" "net/url" @@ -35,7 +35,9 @@ import ( "go.etcd.io/etcd/pkg/v3/netutil" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" bolt "go.etcd.io/bbolt" "go.uber.org/multierr" @@ -49,16 +51,23 @@ const ( ClusterStateFlagNew = "new" ClusterStateFlagExisting = "existing" - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 - DefaultMaxTxnOps = uint(128) - DefaultWarningApplyDuration = 100 * time.Millisecond - DefaultMaxRequestBytes = 1.5 * 1024 * 1024 - DefaultGRPCKeepAliveMinTime = 5 * time.Second - DefaultGRPCKeepAliveInterval = 2 * time.Hour - DefaultGRPCKeepAliveTimeout = 20 * time.Second - DefaultDowngradeCheckTime = 5 * time.Second + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxTxnOps = uint(128) + DefaultWarningApplyDuration = 100 * time.Millisecond + DefaultWarningUnaryRequestDuration = 300 * time.Millisecond + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second + DefaultDowngradeCheckTime = 5 * time.Second + DefaultWaitClusterReadyTimeout = 5 * time.Second + + DefaultDiscoveryDialTimeout = 2 * time.Second + DefaultDiscoveryRequestTimeOut = 5 * time.Second + DefaultDiscoveryKeepAliveTime = 2 * time.Second + DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second DefaultListenPeerURLs = "http://localhost:2380" DefaultListenClientURLs = "http://localhost:2379" @@ -85,9 +94,6 @@ const ( // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag. // It's enabled by default. DefaultStrictReconfigCheck = true - // DefaultEnableV2 is the default value for "--enable-v2" flag. - // v2 API is disabled by default. - DefaultEnableV2 = false // maxElectionMs specifies the maximum value of election timeout. // More details are listed in ../Documentation/tuning.md#time-parameters. @@ -98,7 +104,7 @@ const ( var ( ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " + - "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"") + "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"") ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly") ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined") @@ -207,7 +213,7 @@ type Config struct { // SelfSignedCertValidity specifies the validity period of the client and peer certificates // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS, // the unit is year, and the default is 1 - SelfSignedCertValidity uint + SelfSignedCertValidity uint `json:"self-signed-cert-validity"` // CipherSuites is a list of supported TLS cipher suites between // client/server and peers. If empty, Go auto-populates the list. @@ -218,15 +224,14 @@ type Config struct { DNSCluster string `json:"discovery-srv"` DNSClusterServiceName string `json:"discovery-srv-name"` Dproxy string `json:"discovery-proxy"` - Durl string `json:"discovery"` - InitialCluster string `json:"initial-cluster"` - InitialClusterToken string `json:"initial-cluster-token"` - StrictReconfigCheck bool `json:"strict-reconfig-check"` - // EnableV2 exposes the deprecated V2 API surface. - // TODO: Delete in 3.6 (https://github.com/etcd-io/etcd/issues/12913) - // Deprecated in 3.5. - EnableV2 bool `json:"enable-v2"` + Durl string `json:"discovery"` + DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"` + + InitialCluster string `json:"initial-cluster"` + InitialClusterToken string `json:"initial-cluster-token"` + StrictReconfigCheck bool `json:"strict-reconfig-check"` + ExperimentalWaitClusterReadyTimeout time.Duration `json:"wait-cluster-ready-timeout"` // AutoCompactionMode is either 'periodic' or 'revision'. AutoCompactionMode string `json:"auto-compaction-mode"` @@ -252,7 +257,7 @@ type Config struct { GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` // SocketOpts are socket options passed to listener config. - SocketOpts transport.SocketOpts + SocketOpts transport.SocketOpts `json:"socket-options"` // PreVote is true to enable Raft Pre-Vote. // If enabled, Raft runs an additional election phase @@ -310,13 +315,16 @@ type Config struct { ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"` ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"` - // ExperimentalEnableV2V3 configures URLs that expose deprecated V2 API working on V3 store. - // Deprecated in v3.5. - // TODO: Delete in v3.6 (https://github.com/etcd-io/etcd/issues/12913) - ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"` - // ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases. - ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` - ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` + // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change. + ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"` + // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. + // Requires experimental-enable-lease-checkpoint to be enabled. + // Deprecated in v3.6. + // TODO: Delete in v3.7 + ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"` + ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"` + // ExperimentalCompactionSleepInterval is the sleep interval between every etcd compaction loop. + ExperimentalCompactionSleepInterval time.Duration `json:"experimental-compaction-sleep-interval"` ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"` // ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request // takes more time than this value. @@ -324,6 +332,11 @@ type Config struct { // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect. ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"` + // ExperimentalWarningUnaryRequestDuration is the time duration after which a warning is generated if applying + // unary request takes more time than this value. + ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"` + // ExperimentalMaxLearners sets a limit to the number of learner members that can exist in the cluster membership. + ExperimentalMaxLearners int `json:"experimental-max-learners"` // ForceNewCluster starts a new cluster even if previously started; unsafe. ForceNewCluster bool `json:"force-new-cluster"` @@ -346,12 +359,17 @@ type Config struct { // that exist at the same time. // Can only be used if ExperimentalEnableDistributedTracing is true. ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"` + // ExperimentalDistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans. + // Defaults to 0. + ExperimentalDistributedTracingSamplingRatePerMillion int `json:"experimental-distributed-tracing-sampling-rate"` // Logger is logger options: currently only supports "zap". // "capnslog" is removed in v3.5. Logger string `json:"logger"` // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'. LogLevel string `json:"log-level"` + // LogFormat set log encoding. Only supports json, console. Default is 'json'. + LogFormat string `json:"log-format"` // LogOutputs is either: // - "default" as os.Stderr, // - "stderr" as os.Stderr, @@ -445,11 +463,16 @@ func NewConfig() *Config { MaxRequestBytes: DefaultMaxRequestBytes, ExperimentalWarningApplyDuration: DefaultWarningApplyDuration, + ExperimentalWarningUnaryRequestDuration: DefaultWarningUnaryRequestDuration, + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, - SocketOpts: transport.SocketOpts{}, + SocketOpts: transport.SocketOpts{ + ReusePort: false, + ReuseAddress: false, + }, TickMs: 100, ElectionMs: 1000, @@ -460,12 +483,12 @@ func NewConfig() *Config { APUrls: []url.URL{*apurl}, ACUrls: []url.URL{*acurl}, - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + ExperimentalWaitClusterReadyTimeout: DefaultWaitClusterReadyTimeout, StrictReconfigCheck: DefaultStrictReconfigCheck, Metrics: "basic", - EnableV2: DefaultEnableV2, CORS: map[string]struct{}{"*": {}}, HostWhitelist: map[string]struct{}{"*": {}}, @@ -488,8 +511,16 @@ func NewConfig() *Config { ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime, ExperimentalMemoryMlock: false, ExperimentalTxnModeWriteWithSharedBuffer: true, + ExperimentalMaxLearners: membership.DefaultMaxLearners, V2Deprecation: config.V2_DEPR_DEFAULT, + + DiscoveryCfg: v3discovery.DiscoveryConfig{ + DialTimeout: DefaultDiscoveryDialTimeout, + RequestTimeOut: DefaultDiscoveryRequestTimeOut, + KeepAliveTime: DefaultDiscoveryKeepAliveTime, + KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut, + }, } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg @@ -504,7 +535,7 @@ func ConfigFromFile(path string) (*Config, error) { } func (cfg *configYAML) configFromFile(path string) error { - b, err := ioutil.ReadFile(path) + b, err := os.ReadFile(path) if err != nil { return err } @@ -571,8 +602,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.HostWhitelist = uv.Values } - // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName - if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { + // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName + if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster { cfg.InitialCluster = "" } if cfg.ClusterState == "" { @@ -591,7 +622,9 @@ func (cfg *configYAML) configFromFile(path string) error { copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON) cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS - + if cfg.SelfSignedCertValidity == 0 { + cfg.SelfSignedCertValidity = 1 + } return cfg.Validate() } @@ -637,7 +670,7 @@ func (cfg *Config) Validate() error { } // Check if conflicting flags are passed. nSet := 0 - for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} { + for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} { if v { nSet++ } @@ -651,6 +684,28 @@ func (cfg *Config) Validate() error { return ErrConflictBootstrapFlags } + // Check if both v2 discovery and v3 discovery flags are passed. + v2discoveryFlagsExist := cfg.Dproxy != "" + v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 || + cfg.DiscoveryCfg.Token != "" || + cfg.DiscoveryCfg.CertFile != "" || + cfg.DiscoveryCfg.KeyFile != "" || + cfg.DiscoveryCfg.TrustedCAFile != "" || + cfg.DiscoveryCfg.User != "" || + cfg.DiscoveryCfg.Password != "" + + if v2discoveryFlagsExist && v3discoveryFlagsExist { + return errors.New("both v2 discovery settings (discovery, discovery-proxy) " + + "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " + + "discovery-key, discovery-cacert, discovery-user, discovery-password) are set") + } + + // If one of `discovery-token` and `discovery-endpoints` is provided, + // then the other one must be provided as well. + if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) { + return errors.New("both --discovery-token and --discovery-endpoints must be set") + } + if cfg.TickMs == 0 { return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs) } @@ -676,6 +731,21 @@ func (cfg *Config) Validate() error { return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode) } + // Validate distributed tracing configuration but only if enabled. + if cfg.ExperimentalEnableDistributedTracing { + if err := validateTracingConfig(cfg.ExperimentalDistributedTracingSamplingRatePerMillion); err != nil { + return fmt.Errorf("distributed tracing configurition is not valid: (%v)", err) + } + } + + if !cfg.ExperimentalEnableLeaseCheckpointPersist && cfg.ExperimentalEnableLeaseCheckpoint { + cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling experimental-enable-lease-checkpoint-persist") + } + + if cfg.ExperimentalEnableLeaseCheckpointPersist && !cfg.ExperimentalEnableLeaseCheckpoint { + return fmt.Errorf("setting experimental-enable-lease-checkpoint-persist requires experimental-enable-lease-checkpoint") + } + return nil } @@ -685,11 +755,18 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok switch { case cfg.Durl != "": urlsmap = types.URLsMap{} - // If using discovery, generate a temporary cluster based on + // If using v2 discovery, generate a temporary cluster based on // self's advertised peer URLs urlsmap[cfg.Name] = cfg.APUrls token = cfg.Durl + case len(cfg.DiscoveryCfg.Endpoints) > 0: + urlsmap = types.URLsMap{} + // If using v3 discovery, generate a temporary cluster based on + // self's advertised peer URLs + urlsmap[cfg.Name] = cfg.APUrls + token = cfg.DiscoveryCfg.Token + case cfg.DNSCluster != "": clusterStrs, cerr := cfg.GetDNSClusterNames() lg := cfg.logger diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go index 9cb6e57776..b019289e6d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go @@ -19,7 +19,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net/url" "os" @@ -106,6 +106,11 @@ func (cfg *Config) setupLogging() error { copied.ErrorOutputPaths = errOutputPaths copied = logutil.MergeOutputPaths(copied) copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) + if err != nil { + return err + } + copied.Encoding = encoding if cfg.ZapLoggerBuilder == nil { lg, err := copied.Build() if err != nil { @@ -130,10 +135,22 @@ func (cfg *Config) setupLogging() error { lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel)) + var encoder zapcore.Encoder + encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat) + if err != nil { + return err + } + + if encoding == logutil.ConsoleLogFormat { + encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) + } else { + encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig) + } + // WARN: do not change field names in encoder config // journald logging writer assumes field names of "level" and "caller" cr := zapcore.NewCore( - zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig), + encoder, syncer, lvl, ) @@ -213,7 +230,7 @@ func (cfg *Config) SetupGlobalLoggers() { grpc.EnableTracing = true grpclog.SetLoggerV2(zapgrpc.NewLogger(lg)) } else { - grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr)) } zap.ReplaceGlobals(lg) } diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go new file mode 100644 index 0000000000..2ee7035431 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go @@ -0,0 +1,117 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package embed + +import ( + "context" + "fmt" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.uber.org/zap" +) + +const maxSamplingRatePerMillion = 1000000 + +func validateTracingConfig(samplingRate int) error { + if samplingRate < 0 { + return fmt.Errorf("tracing sampling rate must be positive") + } + if samplingRate > maxSamplingRatePerMillion { + return fmt.Errorf("tracing sampling rate must be less than %d", maxSamplingRatePerMillion) + } + + return nil +} + +func setupTracingExporter(ctx context.Context, cfg *Config) (exporter tracesdk.SpanExporter, options []otelgrpc.Option, err error) { + exporter, err = otlptracegrpc.New(ctx, + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(cfg.ExperimentalDistributedTracingAddress), + ) + if err != nil { + return nil, nil, err + } + + res, err := resource.New(ctx, + resource.WithAttributes( + semconv.ServiceNameKey.String(cfg.ExperimentalDistributedTracingServiceName), + ), + ) + if err != nil { + return nil, nil, err + } + + if resWithIDKey := determineResourceWithIDKey(cfg.ExperimentalDistributedTracingServiceInstanceID); resWithIDKey != nil { + // Merge resources into a new + // resource in case of duplicates. + res, err = resource.Merge(res, resWithIDKey) + if err != nil { + return nil, nil, err + } + } + + options = append(options, + otelgrpc.WithPropagators( + propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ), + ), + otelgrpc.WithTracerProvider( + tracesdk.NewTracerProvider( + tracesdk.WithBatcher(exporter), + tracesdk.WithResource(res), + tracesdk.WithSampler( + tracesdk.ParentBased(determineSampler(cfg.ExperimentalDistributedTracingSamplingRatePerMillion)), + ), + ), + ), + ) + + cfg.logger.Debug( + "distributed tracing enabled", + zap.String("address", cfg.ExperimentalDistributedTracingAddress), + zap.String("service-name", cfg.ExperimentalDistributedTracingServiceName), + zap.String("service-instance-id", cfg.ExperimentalDistributedTracingServiceInstanceID), + zap.Int("sampling-rate", cfg.ExperimentalDistributedTracingSamplingRatePerMillion), + ) + + return exporter, options, err +} + +func determineSampler(samplingRate int) tracesdk.Sampler { + sampler := tracesdk.NeverSample() + if samplingRate == 0 { + return sampler + } + return tracesdk.TraceIDRatioBased(float64(samplingRate) / float64(maxSamplingRatePerMillion)) +} + +// As Tracing service Instance ID must be unique, it should +// never use the empty default string value, it's set if +// if it's a non empty string. +func determineResourceWithIDKey(serviceInstanceID string) *resource.Resource { + if serviceInstanceID != "" { + return resource.NewSchemaless( + (semconv.ServiceInstanceIDKey.String(serviceInstanceID)), + ) + } + return nil +} diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go index 001302f991..c43adca6be 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go @@ -18,7 +18,7 @@ import ( "context" "crypto/tls" "fmt" - "io/ioutil" + "io" defaultLog "log" "net" "net/http" @@ -26,6 +26,7 @@ import ( "runtime" "sort" "strconv" + "strings" "sync" "time" @@ -38,21 +39,12 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc" + "go.etcd.io/etcd/server/v3/storage" "go.etcd.io/etcd/server/v3/verify" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/soheilhy/cmux" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" - "go.opentelemetry.io/otel/exporters/otlp" - "go.opentelemetry.io/otel/exporters/otlp/otlpgrpc" - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/sdk/resource" - tracesdk "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/semconv" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/keepalive" @@ -186,10 +178,12 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { InitialClusterToken: token, DiscoveryURL: cfg.Durl, DiscoveryProxy: cfg.Dproxy, + DiscoveryCfg: cfg.DiscoveryCfg, NewCluster: cfg.IsNewCluster(), PeerTLSInfo: cfg.PeerTLSInfo, TickMs: cfg.TickMs, ElectionTicks: cfg.ElectionTicks(), + WaitClusterReadyTimeout: cfg.ExperimentalWaitClusterReadyTimeout, InitialElectionTickAdvance: cfg.InitialElectionTickAdvance, AutoCompactionRetention: autoCompactionRetention, AutoCompactionMode: cfg.AutoCompactionMode, @@ -216,19 +210,23 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { ExperimentalEnableDistributedTracing: cfg.ExperimentalEnableDistributedTracing, UnsafeNoFsync: cfg.UnsafeNoFsync, EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint, + LeaseCheckpointPersist: cfg.ExperimentalEnableLeaseCheckpointPersist, CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit, + CompactionSleepInterval: cfg.ExperimentalCompactionSleepInterval, WatchProgressNotifyInterval: cfg.ExperimentalWatchProgressNotifyInterval, DowngradeCheckTime: cfg.ExperimentalDowngradeCheckTime, WarningApplyDuration: cfg.ExperimentalWarningApplyDuration, + WarningUnaryRequestDuration: cfg.ExperimentalWarningUnaryRequestDuration, ExperimentalMemoryMlock: cfg.ExperimentalMemoryMlock, ExperimentalTxnModeWriteWithSharedBuffer: cfg.ExperimentalTxnModeWriteWithSharedBuffer, ExperimentalBootstrapDefragThresholdMegabytes: cfg.ExperimentalBootstrapDefragThresholdMegabytes, - V2Deprecation: cfg.V2DeprecationEffective(), + ExperimentalMaxLearners: cfg.ExperimentalMaxLearners, + V2Deprecation: cfg.V2DeprecationEffective(), } if srvcfg.ExperimentalEnableDistributedTracing { tctx := context.Background() - tracingExporter, opts, err := e.setupTracing(tctx) + tracingExporter, opts, err := setupTracingExporter(tctx, cfg) if err != nil { return e, err } @@ -237,6 +235,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { } e.tracingExporterShutdown = func() { tracingExporter.Shutdown(tctx) } srvcfg.ExperimentalTracerOptions = opts + + e.cfg.logger.Info( + "distributed tracing setup enabled", + ) } print(e.cfg.logger, *cfg, srvcfg, memberInitialized) @@ -301,7 +303,7 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized quota := ec.QuotaBackendBytes if quota == 0 { - quota = etcdserver.DefaultQuotaBytes + quota = storage.DefaultQuotaBytes } lg.Info( @@ -322,6 +324,7 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized zap.Bool("force-new-cluster", sc.ForceNewCluster), zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)), zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)), + zap.String("wait-cluster-ready-timeout", sc.WaitClusterReadyTimeout.String()), zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance), zap.Uint64("snapshot-count", sc.SnapshotCount), zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries), @@ -344,7 +347,22 @@ func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()), zap.String("discovery-url", sc.DiscoveryURL), zap.String("discovery-proxy", sc.DiscoveryProxy), + + zap.String("discovery-token", sc.DiscoveryCfg.Token), + zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")), + zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()), + zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeOut.String()), + zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()), + zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()), + zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.InsecureTransport), + zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.InsecureSkipVerify), + zap.String("discovery-cert", sc.DiscoveryCfg.CertFile), + zap.String("discovery-key", sc.DiscoveryCfg.KeyFile), + zap.String("discovery-cacert", sc.DiscoveryCfg.TrustedCAFile), + zap.String("discovery-user", sc.DiscoveryCfg.User), + zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()), + zap.Int("max-learners", sc.ExperimentalMaxLearners), ) } @@ -539,13 +557,13 @@ func (e *Etcd) servePeers() (err error) { for _, p := range e.Peers { u := p.Listener.Addr().String() - gs := v3rpc.Server(e.Server, peerTLScfg) + gs := v3rpc.Server(e.Server, peerTLScfg, nil) m := cmux.New(p.Listener) go gs.Serve(m.Match(cmux.HTTP2())) srv := &http.Server{ Handler: grpcHandlerFunc(gs, ph), ReadTimeout: 5 * time.Minute, - ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error } go srv.Serve(m.Match(cmux.Any())) p.serve = func() error { @@ -692,25 +710,9 @@ func (e *Etcd) serveClients() (err error) { } // Start a client server goroutine for each listen address - var h http.Handler - if e.Config().EnableV2 { - if e.Config().V2DeprecationEffective().IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { - return fmt.Errorf("--enable-v2 and --v2-deprecation=%s are mutually exclusive", e.Config().V2DeprecationEffective()) - } - e.cfg.logger.Warn("Flag `enable-v2` is deprecated and will get removed in etcd 3.6.") - if len(e.Config().ExperimentalEnableV2V3) > 0 { - e.cfg.logger.Warn("Flag `experimental-enable-v2v3` is deprecated and will get removed in etcd 3.6.") - srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3) - h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout()) - } else { - h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout()) - } - } else { - mux := http.NewServeMux() - etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) - etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, mux, e.Server) - h = mux - } + mux := http.NewServeMux() + etcdhttp.HandleBasic(e.cfg.logger, mux, e.Server) + etcdhttp.HandleMetricsHealthForV3(e.cfg.logger, mux, e.Server) gopts := []grpc.ServerOption{} if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { @@ -730,7 +732,7 @@ func (e *Etcd) serveClients() (err error) { // start client servers in each goroutine for _, sctx := range e.sctxs { go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...)) + e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, gopts...)) }(sctx) } return nil @@ -808,52 +810,3 @@ func parseCompactionRetention(mode, retention string) (ret time.Duration, err er } return ret, nil } - -func (e *Etcd) setupTracing(ctx context.Context) (exporter tracesdk.SpanExporter, options []otelgrpc.Option, err error) { - exporter, err = otlp.NewExporter(ctx, - otlpgrpc.NewDriver( - otlpgrpc.WithEndpoint(e.cfg.ExperimentalDistributedTracingAddress), - otlpgrpc.WithInsecure(), - )) - if err != nil { - return nil, nil, err - } - res := resource.NewWithAttributes( - semconv.ServiceNameKey.String(e.cfg.ExperimentalDistributedTracingServiceName), - ) - // As Tracing service Instance ID must be unique, it should - // never use the empty default string value, so we only set it - // if it's a non empty string. - if e.cfg.ExperimentalDistributedTracingServiceInstanceID != "" { - resWithIDKey := resource.NewWithAttributes( - (semconv.ServiceInstanceIDKey.String(e.cfg.ExperimentalDistributedTracingServiceInstanceID)), - ) - // Merge resources to combine into a new - // resource in case of duplicates. - res = resource.Merge(res, resWithIDKey) - } - - options = append(options, - otelgrpc.WithPropagators( - propagation.NewCompositeTextMapPropagator( - propagation.TraceContext{}, - propagation.Baggage{}, - ), - ), - otelgrpc.WithTracerProvider( - tracesdk.NewTracerProvider( - tracesdk.WithBatcher(exporter), - tracesdk.WithResource(res), - ), - ), - ) - - e.cfg.logger.Info( - "distributed tracing enabled", - zap.String("distributed-tracing-address", e.cfg.ExperimentalDistributedTracingAddress), - zap.String("distributed-tracing-service-name", e.cfg.ExperimentalDistributedTracingServiceName), - zap.String("distributed-tracing-service-instance-id", e.cfg.ExperimentalDistributedTracingServiceInstanceID), - ) - - return exporter, options, err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go index 17b55384eb..bce15a339d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go @@ -17,12 +17,13 @@ package embed import ( "context" "fmt" - "io/ioutil" + "io" defaultLog "log" "math" "net" "net/http" "strings" + "time" etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw" "go.etcd.io/etcd/client/pkg/v3/transport" @@ -92,8 +93,16 @@ func (sctx *serveCtx) serve( handler http.Handler, errHandler func(error), gopts ...grpc.ServerOption) (err error) { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) - <-s.ReadyNotify() + logger := defaultLog.New(io.Discard, "etcdhttp", 0) + + // When the quorum isn't satisfied, then etcd server will be blocked + // on <-s.ReadyNotify(). Set a timeout here so that the etcd server + // can continue to serve serializable read request. + select { + case <-time.After(s.Cfg.WaitClusterReadyTimeout): + sctx.lg.Warn("timed out waiting for the ready notification") + case <-s.ReadyNotify(): + } sctx.lg.Info("ready to serve client requests") @@ -110,7 +119,7 @@ func (sctx *serveCtx) serve( }() if sctx.insecure { - gs = v3rpc.Server(s, nil, gopts...) + gs = v3rpc.Server(s, nil, nil, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { @@ -148,7 +157,7 @@ func (sctx *serveCtx) serve( if tlsErr != nil { return tlsErr } - gs = v3rpc.Server(s, tlscfg, gopts...) + gs = v3rpc.Server(s, tlscfg, nil, gopts...) v3electionpb.RegisterElectionServer(gs, servElection) v3lockpb.RegisterLockServer(gs, servLock) if sctx.serviceRegister != nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/util.go b/vendor/go.etcd.io/etcd/server/v3/embed/util.go index ad46153455..269fbc80b2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/embed/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/embed/util.go @@ -17,7 +17,7 @@ package embed import ( "path/filepath" - "go.etcd.io/etcd/server/v3/wal" + "go.etcd.io/etcd/server/v3/storage/wal" ) func isMemberInitialized(cfg *Config) bool { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go new file mode 100644 index 0000000000..bc4b686454 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go @@ -0,0 +1,90 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + + "github.com/coreos/go-semver/semver" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/membershippb" + "go.etcd.io/etcd/api/v3/version" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +// serverVersionAdapter implements Server interface needed by serverversion.Monitor +type serverVersionAdapter struct { + *EtcdServer +} + +func newServerVersionAdapter(s *EtcdServer) *serverVersionAdapter { + return &serverVersionAdapter{ + EtcdServer: s, + } +} + +var _ serverversion.Server = (*serverVersionAdapter)(nil) + +func (s *serverVersionAdapter) UpdateClusterVersion(version string) { + s.GoAttach(func() { s.updateClusterVersionV3(version) }) +} + +func (s *serverVersionAdapter) LinearizableReadNotify(ctx context.Context) error { + return s.linearizableReadNotify(ctx) +} + +func (s *serverVersionAdapter) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} + _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + return err +} + +func (s *serverVersionAdapter) DowngradeCancel(ctx context.Context) error { + raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} + _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + return err +} + +func (s *serverVersionAdapter) GetClusterVersion() *semver.Version { + return s.cluster.Version() +} + +func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo { + return s.cluster.DowngradeInfo() +} + +func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions { + return getMembersVersions(s.lg, s.cluster, s.id, s.peerRt) +} + +func (s *serverVersionAdapter) GetStorageVersion() *semver.Version { + tx := s.be.BatchTx() + tx.Lock() + defer tx.Unlock() + v, err := schema.UnsafeDetectSchemaVersion(s.lg, tx) + if err != nil { + return nil + } + return &v +} + +func (s *serverVersionAdapter) UpdateStorageVersion(target semver.Version) error { + tx := s.be.BatchTx() + tx.Lock() + defer tx.Unlock() + return schema.UnsafeMigrate(s.lg, tx, s.r.storage, target) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go index ea2f0e97e4..9c243294e3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go @@ -18,7 +18,7 @@ import ( "sync" "go.etcd.io/etcd/api/v3/version" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.uber.org/zap" "github.com/coreos/go-semver/semver" @@ -40,6 +40,7 @@ var ( "3.3.0": {AuthCapability: true, V3rpcCapability: true}, "3.4.0": {AuthCapability: true, V3rpcCapability: true}, "3.5.0": {AuthCapability: true, V3rpcCapability: true}, + "3.6.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -63,7 +64,7 @@ func UpdateCapability(lg *zap.Logger, v *semver.Version) { return } enableMapMu.Lock() - if curVersion != nil && !membership.IsValidVersionChange(v, curVersion) { + if curVersion != nil && !serverversion.IsValidVersionChange(v, curVersion) { enableMapMu.Unlock() return } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go index dcfa3f0695..06067cc444 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/base.go @@ -29,7 +29,6 @@ import ( ) const ( - configPath = "/config" varsPath = "/debug/vars" versionPath = "/version" ) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go index b14a13c9c5..fedf2a9e33 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/raft/v3" + "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" "go.uber.org/zap" ) @@ -39,14 +40,16 @@ const ( // HandleMetricsHealth registers metrics and health handlers. func HandleMetricsHealth(lg *zap.Logger, mux *http.ServeMux, srv etcdserver.ServerV2) { mux.Handle(PathMetrics, promhttp.Handler()) - mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV2Health(lg, srv, excludedAlarms) })) + mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet, serializable bool) Health { return checkV2Health(lg, srv, excludedAlarms) })) } // HandleMetricsHealthForV3 registers metrics and health handlers. it checks health by using v3 range request // and its corresponding timeout. func HandleMetricsHealthForV3(lg *zap.Logger, mux *http.ServeMux, srv *etcdserver.EtcdServer) { mux.Handle(PathMetrics, promhttp.Handler()) - mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet) Health { return checkV3Health(lg, srv, excludedAlarms) })) + mux.Handle(PathHealth, NewHealthHandler(lg, func(excludedAlarms AlarmSet, serializable bool) Health { + return checkV3Health(lg, srv, excludedAlarms, serializable) + })) } // HandlePrometheus registers prometheus handler on '/metrics'. @@ -55,7 +58,7 @@ func HandlePrometheus(mux *http.ServeMux) { } // NewHealthHandler handles '/health' requests. -func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet) Health) http.HandlerFunc { +func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet, Serializable bool) Health) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { if r.Method != http.MethodGet { w.Header().Set("Allow", http.MethodGet) @@ -64,7 +67,12 @@ func NewHealthHandler(lg *zap.Logger, hfunc func(excludedAlarms AlarmSet) Health return } excludedAlarms := getExcludedAlarms(r) - h := hfunc(excludedAlarms) + // Passing the query parameter "serializable=true" ensures that the + // health of the local etcd is checked vs the health of the cluster. + // This is useful for probes attempting to validate the liveness of + // the etcd process vs readiness of the cluster to serve requests. + serializableFlag := getSerializableFlag(r) + h := hfunc(excludedAlarms, serializableFlag) defer func() { if h.Health == "true" { healthSuccess.Inc() @@ -118,7 +126,7 @@ func getExcludedAlarms(r *http.Request) (alarms AlarmSet) { alms, found := r.URL.Query()["exclude"] if found { for _, alm := range alms { - if len(alms) == 0 { + if len(alm) == 0 { continue } alarms[alm] = struct{}{} @@ -127,9 +135,13 @@ func getExcludedAlarms(r *http.Request) (alarms AlarmSet) { return alarms } +func getSerializableFlag(r *http.Request) bool { + return r.URL.Query().Get("serializable") == "true" +} + // TODO: etcdserver.ErrNoLeader in health API -func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) Health { +func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet, serializable bool) Health { h := Health{} h.Health = "true" as := srv.Alarms() @@ -137,8 +149,7 @@ func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSe for _, v := range as { alarmName := v.Alarm.String() if _, found := excludedAlarms[alarmName]; found { - lg.Debug("/health excluded alarm", zap.String("alarm", alarmName)) - delete(excludedAlarms, alarmName) + lg.Debug("/health excluded alarm", zap.String("alarm", v.String())) continue } @@ -156,11 +167,7 @@ func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSe } } - if len(excludedAlarms) > 0 { - lg.Warn("fail exclude alarms from health check", zap.String("exclude alarms", fmt.Sprintf("%+v", excludedAlarms))) - } - - if uint64(srv.Leader()) == raft.None { + if !serializable && (uint64(srv.Leader()) == raft.None) { h.Health = "false" h.Reason = "RAFT NO LEADER" lg.Warn("serving /health false; no leader") @@ -170,7 +177,7 @@ func checkHealth(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSe } func checkV2Health(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms AlarmSet) (h Health) { - if h = checkHealth(lg, srv, excludedAlarms); h.Health != "true" { + if h = checkHealth(lg, srv, excludedAlarms, false); h.Health != "true" { return } ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -186,14 +193,14 @@ func checkV2Health(lg *zap.Logger, srv etcdserver.ServerV2, excludedAlarms Alarm return } -func checkV3Health(lg *zap.Logger, srv *etcdserver.EtcdServer, excludedAlarms AlarmSet) (h Health) { - if h = checkHealth(lg, srv, excludedAlarms); h.Health != "true" { +func checkV3Health(lg *zap.Logger, srv *etcdserver.EtcdServer, excludedAlarms AlarmSet, serializable bool) (h Health) { + if h = checkHealth(lg, srv, excludedAlarms, serializable); h.Health != "true" { return } ctx, cancel := context.WithTimeout(context.Background(), srv.Cfg.ReqTimeout()) - _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1}) + _, err := srv.Range(ctx, &etcdserverpb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable}) cancel() - if err != nil { + if err != nil && err != auth.ErrUserEmpty && err != auth.ErrPermissionDenied { h.Health = "false" h.Reason = fmt.Sprintf("RANGE ERROR:%s", err) lg.Warn("serving /health false; Range fails", zap.Error(err)) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go index 3df9588be8..18ac1a35f5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go @@ -21,7 +21,6 @@ import ( "encoding/binary" "encoding/json" "fmt" - "path" "sort" "strings" "sync" @@ -30,19 +29,17 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/netutil" + "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "github.com/coreos/go-semver/semver" "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" ) -const maxLearners = 1 - // RaftCluster is a list of Members that belong to the same raft cluster type RaftCluster struct { lg *zap.Logger @@ -51,7 +48,7 @@ type RaftCluster struct { cid types.ID v2store v2store.Store - be backend.Backend + be MembershipBackend sync.Mutex // guards the fields below version *semver.Version @@ -60,7 +57,9 @@ type RaftCluster struct { // removed id cannot be reused. removed map[types.ID]bool - downgradeInfo *DowngradeInfo + downgradeInfo *serverversion.DowngradeInfo + maxLearners int + versionChanged *notify.Notifier } // ConfigChangeContext represents a context for confChange. @@ -81,8 +80,8 @@ const ( // NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating // cluster with raft learner member. -func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) { - c := NewCluster(lg) +func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap, opts ...ClusterOption) (*RaftCluster, error) { + c := NewCluster(lg, opts...) for name, urls := range urlsmap { m := NewMember(name, urls, token, nil) if _, ok := c.members[m.ID]; ok { @@ -97,8 +96,8 @@ func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) return c, nil } -func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCluster { - c := NewCluster(lg) +func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member, opts ...ClusterOption) *RaftCluster { + c := NewCluster(lg, opts...) c.cid = id for _, m := range membs { c.members[m.ID] = m @@ -106,15 +105,18 @@ func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member) *RaftCl return c } -func NewCluster(lg *zap.Logger) *RaftCluster { +func NewCluster(lg *zap.Logger, opts ...ClusterOption) *RaftCluster { if lg == nil { lg = zap.NewNop() } + clOpts := newClusterOpts(opts...) + return &RaftCluster{ lg: lg, members: make(map[types.ID]*Member), removed: make(map[types.ID]bool), - downgradeInfo: &DowngradeInfo{Enabled: false}, + downgradeInfo: &serverversion.DowngradeInfo{Enabled: false}, + maxLearners: clOpts.maxLearners, } } @@ -241,13 +243,18 @@ func (c *RaftCluster) genID() { func (c *RaftCluster) SetID(localID, cid types.ID) { c.localID = localID c.cid = cid + c.buildMembershipMetric() } func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st } -func (c *RaftCluster) SetBackend(be backend.Backend) { +func (c *RaftCluster) SetBackend(be MembershipBackend) { c.be = be - mustCreateBackendBuckets(c.be) + c.be.MustCreateBackendBuckets() +} + +func (c *RaftCluster) SetVersionChangedNotifier(n *notify.Notifier) { + c.versionChanged = n } func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { @@ -255,21 +262,26 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { defer c.Unlock() if c.be != nil { - c.version = clusterVersionFromBackend(c.lg, c.be) - c.members, c.removed = membersFromBackend(c.lg, c.be) + c.version = c.be.ClusterVersionFromBackend() + c.members, c.removed = c.be.MustReadMembersFromBackend() } else { c.version = clusterVersionFromStore(c.lg, c.v2store) c.members, c.removed = membersFromStore(c.lg, c.v2store) } + c.buildMembershipMetric() if c.be != nil { - c.downgradeInfo = downgradeInfoFromBackend(c.lg, c.be) + c.downgradeInfo = c.be.DowngradeInfoFromBackend() } - d := &DowngradeInfo{Enabled: false} - if c.downgradeInfo != nil { - d = &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + sv := semver.Must(semver.NewVersion(version.Version)) + if c.downgradeInfo != nil && c.downgradeInfo.Enabled { + c.lg.Info( + "cluster is downgrading to target version", + zap.String("target-cluster-version", c.downgradeInfo.TargetVersion), + zap.String("current-server-version", sv.String()), + ) } - mustDetectDowngrade(c.lg, c.version, d) + serverversion.MustDetectDowngrade(c.lg, sv, c.version) onSet(c.lg, c.version) for _, m := range c.members { @@ -279,6 +291,7 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { zap.String("local-member-id", c.localID.String()), zap.String("recovered-remote-peer-id", m.ID.String()), zap.Strings("recovered-remote-peer-urls", m.PeerURLs), + zap.Bool("recovered-remote-peer-is-learner", m.IsLearner), ) } if c.version != nil { @@ -293,9 +306,9 @@ func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) { // ensures that it is still valid. func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { // TODO: this must be switched to backend as well. - members, removed := membersFromStore(c.lg, c.v2store) + membersMap, removedMap := membersFromStore(c.lg, c.v2store) id := types.ID(cc.NodeID) - if removed[id] { + if removedMap[id] { return ErrIDRemoved } switch cc.Type { @@ -306,19 +319,21 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { } if confChangeContext.IsPromote { // promoting a learner member to voting member - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } - if !members[id].IsLearner { + if !membersMap[id].IsLearner { return ErrMemberNotLearner } } else { // adding a new member - if members[id] != nil { + if membersMap[id] != nil { return ErrIDExists } + var members []*Member urls := make(map[string]bool) - for _, m := range members { + for _, m := range membersMap { + members = append(members, m) for _, u := range m.PeerURLs { urls[u] = true } @@ -329,29 +344,24 @@ func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error { } } - if confChangeContext.Member.IsLearner { // the new member is a learner - numLearners := 0 - for _, m := range members { - if m.IsLearner { - numLearners++ - } - } - if numLearners+1 > maxLearners { - return ErrTooManyLearners + if confChangeContext.Member.RaftAttributes.IsLearner && cc.Type == raftpb.ConfChangeAddLearnerNode { // the new member is a learner + scaleUpLearners := true + if err := ValidateMaxLearnerConfig(c.maxLearners, members, scaleUpLearners); err != nil { + return err } } } case raftpb.ConfChangeRemoveNode: - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } case raftpb.ConfChangeUpdateNode: - if members[id] == nil { + if membersMap[id] == nil { return ErrIDNotFound } urls := make(map[string]bool) - for _, m := range members { + for _, m := range membersMap { if m.ID == id { continue } @@ -385,10 +395,11 @@ func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { mustSaveMemberToStore(c.lg, c.v2store, m) } if c.be != nil && shouldApplyV3 { - mustSaveMemberToBackend(c.lg, c.be, m) + c.be.MustSaveMemberToBackend(m) } c.members[m.ID] = m + c.updateMembershipMetric(m.ID, true) c.lg.Info( "added member", @@ -396,6 +407,7 @@ func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) { zap.String("local-member-id", c.localID.String()), zap.String("added-peer-id", m.ID.String()), zap.Strings("added-peer-peer-urls", m.PeerURLs), + zap.Bool("added-peer-is-learner", m.IsLearner), ) } @@ -408,12 +420,13 @@ func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) { mustDeleteMemberFromStore(c.lg, c.v2store, id) } if c.be != nil && shouldApplyV3 { - mustDeleteMemberFromBackend(c.be, id) + c.be.MustDeleteMemberFromBackend(id) } m, ok := c.members[id] delete(c.members, id) c.removed[id] = true + c.updateMembershipMetric(id, false) if ok { c.lg.Info( @@ -422,6 +435,7 @@ func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) { zap.String("local-member-id", c.localID.String()), zap.String("removed-remote-peer-id", id.String()), zap.Strings("removed-remote-peer-urls", m.PeerURLs), + zap.Bool("removed-remote-peer-is-learner", m.IsLearner), ) } else { c.lg.Warn( @@ -443,7 +457,7 @@ func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApply mustUpdateMemberAttrInStore(c.lg, c.v2store, m) } if c.be != nil && shouldApplyV3 { - mustSaveMemberToBackend(c.lg, c.be, m) + c.be.MustSaveMemberToBackend(m) } return } @@ -472,11 +486,12 @@ func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) { defer c.Unlock() c.members[id].RaftAttributes.IsLearner = false + c.updateMembershipMetric(id, true) if c.v2store != nil { mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) } if c.be != nil && shouldApplyV3 { - mustSaveMemberToBackend(c.lg, c.be, c.members[id]) + c.be.MustSaveMemberToBackend(c.members[id]) } c.lg.Info( @@ -495,7 +510,7 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, mustUpdateMemberInStore(c.lg, c.v2store, c.members[id]) } if c.be != nil && shouldApplyV3 { - mustSaveMemberToBackend(c.lg, c.be, c.members[id]) + c.be.MustSaveMemberToBackend(c.members[id]) } c.lg.Info( @@ -504,6 +519,7 @@ func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, zap.String("local-member-id", c.localID.String()), zap.String("updated-remote-peer-id", id.String()), zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs), + zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner), ) } @@ -537,17 +553,21 @@ func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *s } oldVer := c.version c.version = ver - mustDetectDowngrade(c.lg, c.version, c.downgradeInfo) + sv := semver.Must(semver.NewVersion(version.Version)) + serverversion.MustDetectDowngrade(c.lg, sv, c.version) if c.v2store != nil { mustSaveClusterVersionToStore(c.lg, c.v2store, ver) } if c.be != nil && shouldApplyV3 { - mustSaveClusterVersionToBackend(c.be, ver) + c.be.MustSaveClusterVersionToBackend(ver) } if oldVer != nil { ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0) } ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1) + if c.versionChanged != nil { + c.versionChanged.Notify() + } onSet(c.lg, ver) } @@ -676,78 +696,6 @@ func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, m return members, removed } -func membersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { - return mustReadMembersFromBackend(lg, be) -} - -func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { - e, err := st.Get(path.Join(storePrefix, "version"), false, false) - if err != nil { - if isKeyNotFound(err) { - return nil - } - lg.Panic( - "failed to get cluster version from store", - zap.String("path", path.Join(storePrefix, "version")), - zap.Error(err), - ) - } - return semver.Must(semver.NewVersion(*e.Node.Value)) -} - -// The field is populated since etcd v3.5. -func clusterVersionFromBackend(lg *zap.Logger, be backend.Backend) *semver.Version { - ckey := backendClusterVersionKey() - tx := be.ReadTx() - tx.RLock() - defer tx.RUnlock() - keys, vals := tx.UnsafeRange(buckets.Cluster, ckey, nil, 0) - if len(keys) == 0 { - return nil - } - if len(keys) != 1 { - lg.Panic( - "unexpected number of keys when getting cluster version from backend", - zap.Int("number-of-key", len(keys)), - ) - } - return semver.Must(semver.NewVersion(string(vals[0]))) -} - -// The field is populated since etcd v3.5. -func downgradeInfoFromBackend(lg *zap.Logger, be backend.Backend) *DowngradeInfo { - dkey := backendDowngradeKey() - tx := be.ReadTx() - tx.Lock() - defer tx.Unlock() - keys, vals := tx.UnsafeRange(buckets.Cluster, dkey, nil, 0) - if len(keys) == 0 { - return nil - } - - if len(keys) != 1 { - lg.Panic( - "unexpected number of keys when getting cluster version from backend", - zap.Int("number-of-key", len(keys)), - ) - } - var d DowngradeInfo - if err := json.Unmarshal(vals[0], &d); err != nil { - lg.Panic("failed to unmarshal downgrade information", zap.Error(err)) - } - - // verify the downgrade info from backend - if d.Enabled { - if _, err := semver.NewVersion(d.TargetVersion); err != nil { - lg.Panic( - "unexpected version format of the downgrade target version from backend", - zap.String("target-version", d.TargetVersion), - ) - } - } - return &d -} - // ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs // with the existing cluster. If the validation succeeds, it assigns the IDs // from the existing cluster to the local cluster. @@ -778,25 +726,10 @@ func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *R for _, m := range lms { local.members[m.ID] = m } + local.buildMembershipMetric() return nil } -// IsValidVersionChange checks the two scenario when version is valid to change: -// 1. Downgrade: cluster version is 1 minor version higher than local version, -// cluster version should change. -// 2. Cluster start: when not all members version are available, cluster version -// is set to MinVersion(3.0), when all members are at higher version, cluster version -// is lower than local version, cluster version should change -func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool { - cv = &semver.Version{Major: cv.Major, Minor: cv.Minor} - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - - if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) { - return true - } - return false -} - // IsLocalMemberLearner returns if the local member is raft learner func (c *RaftCluster) IsLocalMemberLearner() bool { c.Lock() @@ -813,33 +746,25 @@ func (c *RaftCluster) IsLocalMemberLearner() bool { } // DowngradeInfo returns the downgrade status of the cluster -func (c *RaftCluster) DowngradeInfo() *DowngradeInfo { +func (c *RaftCluster) DowngradeInfo() *serverversion.DowngradeInfo { c.Lock() defer c.Unlock() if c.downgradeInfo == nil { - return &DowngradeInfo{Enabled: false} + return &serverversion.DowngradeInfo{Enabled: false} } - d := &DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} + d := &serverversion.DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion} return d } -func (c *RaftCluster) SetDowngradeInfo(d *DowngradeInfo, shouldApplyV3 ShouldApplyV3) { +func (c *RaftCluster) SetDowngradeInfo(d *serverversion.DowngradeInfo, shouldApplyV3 ShouldApplyV3) { c.Lock() defer c.Unlock() if c.be != nil && shouldApplyV3 { - mustSaveDowngradeToBackend(c.lg, c.be, d) + c.be.MustSaveDowngradeToBackend(d) } c.downgradeInfo = d - - if d.Enabled { - c.lg.Info( - "The server is ready to downgrade", - zap.String("target-version", d.TargetVersion), - zap.String("server-version", version.Version), - ) - } } // IsMemberExist returns if the member with the given id exists in cluster. @@ -868,9 +793,9 @@ func (c *RaftCluster) VotingMemberIDs() []types.ID { // members, such that they fully reflect internal RaftCluster's storage. func (c *RaftCluster) PushMembershipToStorage() { if c.be != nil { - TrimMembershipFromBackend(c.lg, c.be) + c.be.TrimMembershipFromBackend() for _, m := range c.members { - mustSaveMemberToBackend(c.lg, c.be, m) + c.be.MustSaveMemberToBackend(m) } } if c.v2store != nil { @@ -880,3 +805,53 @@ func (c *RaftCluster) PushMembershipToStorage() { } } } + +// buildMembershipMetric sets the knownPeers metric based on the current +// members of the cluster. +func (c *RaftCluster) buildMembershipMetric() { + if c.localID == 0 { + // We don't know our own id yet. + return + } + for p := range c.members { + knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(1) + } + for p := range c.removed { + knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(0) + } +} + +// updateMembershipMetric updates the knownPeers metric to indicate that +// the given peer is now (un)known. +func (c *RaftCluster) updateMembershipMetric(peer types.ID, known bool) { + if c.localID == 0 { + // We don't know our own id yet. + return + } + v := float64(0) + if known { + v = 1 + } + knownPeers.WithLabelValues(c.localID.String(), peer.String()).Set(v) +} + +// ValidateMaxLearnerConfig verifies the existing learner members in the cluster membership and an optional N+1 learner +// scale up are not more than maxLearners. +func ValidateMaxLearnerConfig(maxLearners int, members []*Member, scaleUpLearners bool) error { + numLearners := 0 + for _, m := range members { + if m.IsLearner { + numLearners++ + } + } + // Validate config can accommodate scale up. + if scaleUpLearners { + numLearners++ + } + + if numLearners > maxLearners { + return ErrTooManyLearners + } + + return nil +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go new file mode 100644 index 0000000000..204fbf04d2 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go @@ -0,0 +1,43 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package membership + +const DefaultMaxLearners = 1 + +type ClusterOptions struct { + maxLearners int +} + +// ClusterOption are options which can be applied to the raft cluster. +type ClusterOption func(*ClusterOptions) + +func newClusterOpts(opts ...ClusterOption) *ClusterOptions { + clOpts := &ClusterOptions{} + clOpts.applyOpts(opts) + return clOpts +} + +func (co *ClusterOptions) applyOpts(opts []ClusterOption) { + for _, opt := range opts { + opt(co) + } +} + +// WithMaxLearners sets the maximum number of learners that can exist in the cluster membership. +func WithMaxLearners(max int) ClusterOption { + return func(co *ClusterOptions) { + co.maxLearners = max + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go index b3212bc80c..f08763779f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go @@ -24,8 +24,17 @@ var ( Help: "Which version is running. 1 for 'cluster_version' label with current cluster version", }, []string{"cluster_version"}) + knownPeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "network", + Name: "known_peers", + Help: "The current number of known peers.", + }, + []string{"Local", "Remote"}, + ) ) func init() { prometheus.MustRegister(ClusterVersionMetrics) + prometheus.MustRegister(knownPeers) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go index 0bab3e42ed..bee385b060 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go @@ -1,4 +1,4 @@ -// Copyright 2016 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,311 +15,37 @@ package membership import ( - "encoding/json" - "fmt" "path" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/etcdserver/version" "github.com/coreos/go-semver/semver" "go.uber.org/zap" ) -const ( - attributesSuffix = "attributes" - raftAttributesSuffix = "raftAttributes" - - // the prefix for storing membership related information in store provided by store pkg. - storePrefix = "/0" -) - -var ( - StoreMembersPrefix = path.Join(storePrefix, "members") - storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") -) - -func mustSaveMemberToBackend(lg *zap.Logger, be backend.Backend, m *Member) { - mkey := backendMemberKey(m.ID) - mvalue, err := json.Marshal(m) - if err != nil { - lg.Panic("failed to marshal member", zap.Error(err)) - } - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafePut(buckets.Members, mkey, mvalue) +type MembershipBackend interface { + ClusterVersionBackend + MemberBackend + DowngradeInfoBackend + MustCreateBackendBuckets() } -// TrimClusterFromBackend removes all information about cluster (versions) -// from the v3 backend. -func TrimClusterFromBackend(be backend.Backend) error { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafeDeleteBucket(buckets.Cluster) - return nil -} - -func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { - mkey := backendMemberKey(id) - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafeDelete(buckets.Members, mkey) - tx.UnsafePut(buckets.MembersRemoved, mkey, []byte("removed")) +type ClusterVersionBackend interface { + ClusterVersionFromBackend() *semver.Version + MustSaveClusterVersionToBackend(version *semver.Version) } -func readMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool, error) { - members := make(map[types.ID]*Member) - removed := make(map[types.ID]bool) - - tx := be.ReadTx() - tx.RLock() - defer tx.RUnlock() - err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { - memberId := mustParseMemberIDFromBytes(lg, k) - m := &Member{ID: memberId} - if err := json.Unmarshal(v, &m); err != nil { - return err - } - members[memberId] = m - return nil - }) - if err != nil { - return nil, nil, fmt.Errorf("couldn't read members from backend: %w", err) - } - - err = tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { - memberId := mustParseMemberIDFromBytes(lg, k) - removed[memberId] = true - return nil - }) - if err != nil { - return nil, nil, fmt.Errorf("couldn't read members_removed from backend: %w", err) - } - return members, removed, nil +type MemberBackend interface { + MustReadMembersFromBackend() (map[types.ID]*Member, map[types.ID]bool) + MustSaveMemberToBackend(*Member) + TrimMembershipFromBackend() error + MustDeleteMemberFromBackend(types.ID) } -func mustReadMembersFromBackend(lg *zap.Logger, be backend.Backend) (map[types.ID]*Member, map[types.ID]bool) { - members, removed, err := readMembersFromBackend(lg, be) - if err != nil { - lg.Panic("couldn't read members from backend", zap.Error(err)) - } - return members, removed -} - -// TrimMembershipFromBackend removes all information about members & -// removed_members from the v3 backend. -func TrimMembershipFromBackend(lg *zap.Logger, be backend.Backend) error { - lg.Info("Trimming membership information from the backend...") - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - err := tx.UnsafeForEach(buckets.Members, func(k, v []byte) error { - tx.UnsafeDelete(buckets.Members, k) - lg.Debug("Removed member from the backend", - zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) - return nil - }) - if err != nil { - return err - } - return tx.UnsafeForEach(buckets.MembersRemoved, func(k, v []byte) error { - tx.UnsafeDelete(buckets.MembersRemoved, k) - lg.Debug("Removed removed_member from the backend", - zap.Stringer("member", mustParseMemberIDFromBytes(lg, k))) - return nil - }) -} - -// TrimMembershipFromV2Store removes all information about members & -// removed_members from the v2 store. -func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error { - members, removed := membersFromStore(lg, s) - - for mID := range members { - _, err := s.Delete(MemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - for mID := range removed { - _, err := s.Delete(RemovedMemberStoreKey(mID), true, true) - if err != nil { - return err - } - } - - return nil -} - -// The field is populated since etcd v3.5. -func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) { - ckey := backendClusterVersionKey() - - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafePut(buckets.Cluster, ckey, []byte(ver.String())) -} - -// The field is populated since etcd v3.5. -func mustSaveDowngradeToBackend(lg *zap.Logger, be backend.Backend, downgrade *DowngradeInfo) { - dkey := backendDowngradeKey() - dvalue, err := json.Marshal(downgrade) - if err != nil { - lg.Panic("failed to marshal downgrade information", zap.Error(err)) - } - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafePut(buckets.Cluster, dkey, dvalue) -} - -func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to save member to store", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustDeleteMemberFromStore(lg *zap.Logger, s v2store.Store, id types.ID) { - if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { - lg.Panic( - "failed to delete member from store", - zap.String("path", MemberStoreKey(id)), - zap.Error(err), - ) - } - if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to create removedMember", - zap.String("path", RemovedMemberStoreKey(id)), - zap.Error(err), - ) - } -} - -func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.RaftAttributes) - if err != nil { - lg.Panic("failed to marshal raftAttributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) - if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update raftAttributes", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { - b, err := json.Marshal(m.Attributes) - if err != nil { - lg.Panic("failed to marshal attributes", zap.Error(err)) - } - p := path.Join(MemberStoreKey(m.ID), attributesSuffix) - if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to update attributes", - zap.String("path", p), - zap.Error(err), - ) - } -} - -func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { - if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { - lg.Panic( - "failed to save cluster version to store", - zap.String("path", StoreClusterVersionKey()), - zap.Error(err), - ) - } -} - -// nodeToMember builds member from a key value node. -// the child nodes of the given node MUST be sorted by key. -func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { - m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} - attrs := make(map[string][]byte) - raftAttrKey := path.Join(n.Key, raftAttributesSuffix) - attrKey := path.Join(n.Key, attributesSuffix) - for _, nn := range n.Nodes { - if nn.Key != raftAttrKey && nn.Key != attrKey { - return nil, fmt.Errorf("unknown key %q", nn.Key) - } - attrs[nn.Key] = []byte(*nn.Value) - } - if data := attrs[raftAttrKey]; data != nil { - if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { - return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) - } - } else { - return nil, fmt.Errorf("raftAttributes key doesn't exist") - } - if data := attrs[attrKey]; data != nil { - if err := json.Unmarshal(data, &m.Attributes); err != nil { - return m, fmt.Errorf("unmarshal attributes error: %v", err) - } - } - return m, nil -} - -func backendMemberKey(id types.ID) []byte { - return []byte(id.String()) -} - -func backendClusterVersionKey() []byte { - return []byte("clusterVersion") -} - -func backendDowngradeKey() []byte { - return []byte("downgrade") -} - -func mustCreateBackendBuckets(be backend.Backend) { - tx := be.BatchTx() - tx.Lock() - defer tx.Unlock() - tx.UnsafeCreateBucket(buckets.Members) - tx.UnsafeCreateBucket(buckets.MembersRemoved) - tx.UnsafeCreateBucket(buckets.Cluster) -} - -func MemberStoreKey(id types.ID) string { - return path.Join(StoreMembersPrefix, id.String()) -} - -func StoreClusterVersionKey() string { - return path.Join(storePrefix, "version") -} - -func MemberAttributesStorePath(id types.ID) string { - return path.Join(MemberStoreKey(id), attributesSuffix) -} - -func mustParseMemberIDFromBytes(lg *zap.Logger, key []byte) types.ID { - id, err := types.IDFromString(string(key)) - if err != nil { - lg.Panic("failed to parse member id from key", zap.Error(err)) - } - return id +type DowngradeInfoBackend interface { + MustSaveDowngradeToBackend(*version.DowngradeInfo) + DowngradeInfoFromBackend() *version.DowngradeInfo } func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { @@ -329,7 +55,3 @@ func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID { } return id } - -func RemovedMemberStoreKey(id types.ID) string { - return path.Join(storeRemovedMembersPrefix, id.String()) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go index 8505c63f36..d428cb66e2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go @@ -15,7 +15,29 @@ package membership import ( + "encoding/json" + "fmt" + "path" + + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +const ( + // the prefix for storing membership related information in store provided by store pkg. + storePrefix = "/0" + + attributesSuffix = "attributes" + raftAttributesSuffix = "raftAttributes" +) + +var ( + StoreMembersPrefix = path.Join(storePrefix, "members") + storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members") ) // IsMetaStoreOnly verifies if the given `store` contains only @@ -34,3 +56,155 @@ func IsMetaStoreOnly(store v2store.Store) (bool, error) { return true, nil } + +// TrimMembershipFromV2Store removes all information about members & +// removed_members from the v2 store. +func TrimMembershipFromV2Store(lg *zap.Logger, s v2store.Store) error { + members, removed := membersFromStore(lg, s) + + for mID := range members { + _, err := s.Delete(MemberStoreKey(mID), true, true) + if err != nil { + return err + } + } + for mID := range removed { + _, err := s.Delete(RemovedMemberStoreKey(mID), true, true) + if err != nil { + return err + } + } + + return nil +} + +func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + lg.Panic("failed to marshal raftAttributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to save member to store", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustDeleteMemberFromStore(lg *zap.Logger, s v2store.Store, id types.ID) { + if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil { + lg.Panic( + "failed to delete member from store", + zap.String("path", MemberStoreKey(id)), + zap.Error(err), + ) + } + if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to create removedMember", + zap.String("path", RemovedMemberStoreKey(id)), + zap.Error(err), + ) + } +} + +func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.RaftAttributes) + if err != nil { + lg.Panic("failed to marshal raftAttributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix) + if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to update raftAttributes", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) { + b, err := json.Marshal(m.Attributes) + if err != nil { + lg.Panic("failed to marshal attributes", zap.Error(err)) + } + p := path.Join(MemberStoreKey(m.ID), attributesSuffix) + if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to update attributes", + zap.String("path", p), + zap.Error(err), + ) + } +} + +func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) { + if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil { + lg.Panic( + "failed to save cluster version to store", + zap.String("path", StoreClusterVersionKey()), + zap.Error(err), + ) + } +} + +// nodeToMember builds member from a key value node. +// the child nodes of the given node MUST be sorted by key. +func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) { + m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)} + attrs := make(map[string][]byte) + raftAttrKey := path.Join(n.Key, raftAttributesSuffix) + attrKey := path.Join(n.Key, attributesSuffix) + for _, nn := range n.Nodes { + if nn.Key != raftAttrKey && nn.Key != attrKey { + return nil, fmt.Errorf("unknown key %q", nn.Key) + } + attrs[nn.Key] = []byte(*nn.Value) + } + if data := attrs[raftAttrKey]; data != nil { + if err := json.Unmarshal(data, &m.RaftAttributes); err != nil { + return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err) + } + } else { + return nil, fmt.Errorf("raftAttributes key doesn't exist") + } + if data := attrs[attrKey]; data != nil { + if err := json.Unmarshal(data, &m.Attributes); err != nil { + return m, fmt.Errorf("unmarshal attributes error: %v", err) + } + } + return m, nil +} + +func StoreClusterVersionKey() string { + return path.Join(storePrefix, "version") +} + +func RemovedMemberStoreKey(id types.ID) string { + return path.Join(storeRemovedMembersPrefix, id.String()) +} + +func MemberStoreKey(id types.ID) string { + return path.Join(StoreMembersPrefix, id.String()) +} + +func MemberAttributesStorePath(id types.ID) string { + return path.Join(MemberStoreKey(id), attributesSuffix) +} + +func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version { + e, err := st.Get(path.Join(storePrefix, "version"), false, false) + if err != nil { + if isKeyNotFound(err) { + return nil + } + lg.Panic( + "failed to get cluster version from store", + zap.String("path", path.Join(storePrefix, "version")), + zap.Error(err), + ) + } + return semver.Must(semver.NewVersion(*e.Node.Value)) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go index 149c50b79c..3c784b2bbf 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go @@ -18,7 +18,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net/http" "path" "strings" @@ -111,7 +111,7 @@ func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Limit the data size that could be read from the request body, which ensures that read from // connection will not time out accidentally due to possible blocking in underlying implementation. limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte) - b, err := ioutil.ReadAll(limitedr) + b, err := io.ReadAll(limitedr) if err != nil { h.lg.Warn( "failed to read Raft message", diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go index 444d6bde94..c2f79e08a0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go @@ -250,30 +250,16 @@ func (p *peer) send(m raftpb.Message) { if isMsgSnap(m) { p.r.ReportSnapshot(m.To, raft.SnapshotFailure) } - if p.status.isActive() { - if p.lg != nil { - p.lg.Warn( - "dropped internal Raft message since sending buffer is full (overloaded network)", - zap.String("message-type", m.Type.String()), - zap.String("local-member-id", p.localID.String()), - zap.String("from", types.ID(m.From).String()), - zap.String("remote-peer-id", p.id.String()), - zap.String("remote-peer-name", name), - zap.Bool("remote-peer-active", p.status.isActive()), - ) - } - } else { - if p.lg != nil { - p.lg.Warn( - "dropped internal Raft message since sending buffer is full (overloaded network)", - zap.String("message-type", m.Type.String()), - zap.String("local-member-id", p.localID.String()), - zap.String("from", types.ID(m.From).String()), - zap.String("remote-peer-id", p.id.String()), - zap.String("remote-peer-name", name), - zap.Bool("remote-peer-active", p.status.isActive()), - ) - } + if p.lg != nil { + p.lg.Warn( + "dropped internal Raft message since sending buffer is full", + zap.String("message-type", m.Type.String()), + zap.String("local-member-id", p.localID.String()), + zap.String("from", types.ID(m.From).String()), + zap.String("remote-peer-id", p.id.String()), + zap.String("remote-peer-name", name), + zap.Bool("remote-peer-active", p.status.isActive()), + ) } sentFailures.WithLabelValues(types.ID(m.To).String()).Inc() } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go index de3b459118..96b35c2544 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go @@ -18,7 +18,7 @@ import ( "bytes" "context" "errors" - "io/ioutil" + "io" "runtime" "sync" "time" @@ -154,7 +154,7 @@ func (p *pipeline) post(data []byte) (err error) { return err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { p.picker.unreachable(u) return err diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go index 84eb56bd0b..9f24a565d4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go @@ -18,7 +18,6 @@ import ( "bytes" "context" "io" - "io/ioutil" "net/http" "time" @@ -169,7 +168,7 @@ func (s *snapshotSender) post(req *http.Request) (err error) { // prevents from reading the body forever when the other side dies right after // successfully receives the request body. time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) }) - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) result <- responseAndError{resp, body, err} }() diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go index 321fd5283d..83a5649a7c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go @@ -18,7 +18,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "path" "strings" @@ -59,6 +58,7 @@ var ( "3.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.4.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.5.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.6.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -628,7 +628,7 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID) case http.StatusPreconditionFailed: - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { cr.picker.unreachable(u) return nil, err diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go index 940e8473bc..fa3011cb39 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go @@ -339,24 +339,30 @@ func (t *Transport) RemoveAllPeers() { // the caller of this function must have the peers mutex. func (t *Transport) removePeer(id types.ID) { - if peer, ok := t.peers[id]; ok { + // etcd may remove a member again on startup due to WAL files replaying. + peer, ok := t.peers[id] + if ok { peer.stop() - } else { - if t.Logger != nil { - t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String())) - } + delete(t.peers, id) + delete(t.LeaderStats.Followers, id.String()) + t.pipelineProber.Remove(id.String()) + t.streamProber.Remove(id.String()) } - delete(t.peers, id) - delete(t.LeaderStats.Followers, id.String()) - t.pipelineProber.Remove(id.String()) - t.streamProber.Remove(id.String()) if t.Logger != nil { - t.Logger.Info( - "removed remote peer", - zap.String("local-member-id", t.ID.String()), - zap.String("removed-remote-peer-id", id.String()), - ) + if ok { + t.Logger.Info( + "removed remote peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } else { + t.Logger.Warn( + "skipped removing already removed peer", + zap.String("local-member-id", t.ID.String()), + zap.String("removed-remote-peer-id", id.String()), + ) + } } } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go index af653e1d5f..1d42557bcc 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "time" @@ -36,7 +35,7 @@ var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { start := time.Now() - f, err := ioutil.TempFile(s.dir, "tmp") + f, err := os.CreateTemp(s.dir, "tmp") if err != nil { return 0, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go index 52cc0ae267..a3735e9143 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "hash/crc32" - "io/ioutil" "os" "path/filepath" "sort" @@ -31,7 +30,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.uber.org/zap" ) @@ -160,7 +159,7 @@ func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) { // Read reads the snapshot named by snapname and returns the snapshot. func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) + b, err := os.ReadFile(snapname) if err != nil { if lg != nil { lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err)) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go deleted file mode 100644 index e76ee8b2a1..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth.go +++ /dev/null @@ -1,670 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2auth implements etcd authentication. -package v2auth - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "path" - "reflect" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "go.uber.org/zap" - "golang.org/x/crypto/bcrypt" -) - -const ( - // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data. - StorePermsPrefix = "/2" - - // RootRoleName is the name of the ROOT role, with privileges to manage the cluster. - RootRoleName = "root" - - // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user. - GuestRoleName = "guest" -) - -var rootRole = Role{ - Role: RootRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -var guestRole = Role{ - Role: GuestRoleName, - Permissions: Permissions{ - KV: RWPermission{ - Read: []string{"/*"}, - Write: []string{"/*"}, - }, - }, -} - -type doer interface { - Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error) -} - -type Store interface { - AllUsers() ([]string, error) - GetUser(name string) (User, error) - CreateOrUpdateUser(user User) (out User, created bool, err error) - CreateUser(user User) (User, error) - DeleteUser(name string) error - UpdateUser(user User) (User, error) - AllRoles() ([]string, error) - GetRole(name string) (Role, error) - CreateRole(role Role) error - DeleteRole(name string) error - UpdateRole(role Role) (Role, error) - AuthEnabled() bool - EnableAuth() error - DisableAuth() error - PasswordStore -} - -type PasswordStore interface { - CheckPassword(user User, password string) bool - HashPassword(password string) (string, error) -} - -type store struct { - lg *zap.Logger - server doer - timeout time.Duration - ensuredOnce bool - - PasswordStore -} - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV RWPermission `json:"kv"` -} - -func (p *Permissions) IsEmpty() bool { - return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0) -} - -type RWPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type Error struct { - Status int - Errmsg string -} - -func (ae Error) Error() string { return ae.Errmsg } -func (ae Error) HTTPStatus() int { return ae.Status } - -func authErr(hs int, s string, v ...interface{}) Error { - return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)} -} - -func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store { - if lg == nil { - lg = zap.NewNop() - } - s := &store{ - lg: lg, - server: server, - timeout: timeout, - PasswordStore: passwordStore{}, - } - return s -} - -// passwordStore implements PasswordStore using bcrypt to hash user passwords -type passwordStore struct{} - -func (passwordStore) CheckPassword(user User, password string) bool { - err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password)) - return err == nil -} - -func (passwordStore) HashPassword(password string) (string, error) { - hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) - return string(hash), err -} - -func (s *store) AllUsers() ([]string, error) { - resp, err := s.requestResource("/users/", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return []string{}, nil - } - } - return nil, err - } - var nodes []string - for _, n := range resp.Event.Node.Nodes { - _, user := path.Split(n.Key) - nodes = append(nodes, user) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) } - -// CreateOrUpdateUser should be only used for creating the new user or when you are not -// sure if it is a create or update. (When only password is passed in, we are not sure -// if it is a update or create) -func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) { - _, err = s.getUser(user.User, true) - if err == nil { - out, err = s.UpdateUser(user) - return out, false, err - } - u, err := s.CreateUser(user) - return u, true, err -} - -func (s *store) CreateUser(user User) (User, error) { - // Attach root role to root user. - if user.User == "root" { - user = attachRootRole(user) - } - u, err := s.createUserInternal(user) - if err == nil { - s.lg.Info("created a user", zap.String("user-name", user.User)) - } - return u, err -} - -func (s *store) createUserInternal(user User) (User, error) { - if user.Password == "" { - return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User) - } - hash, err := s.HashPassword(user.Password) - if err != nil { - return user, err - } - user.Password = hash - - _, err = s.createResource("/users/"+user.User, user) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - return user, authErr(http.StatusConflict, "User %s already exists.", user.User) - } - } - } - return user, err -} - -func (s *store) DeleteUser(name string) error { - if s.AuthEnabled() && name == "root" { - return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.") - } - err := s.deleteResource("/users/" + name) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "User %s does not exist", name) - } - } - return err - } - s.lg.Info("deleted a user", zap.String("user-name", name)) - return nil -} - -func (s *store) UpdateUser(user User) (User, error) { - old, err := s.getUser(user.User, true) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User) - } - } - return old, err - } - - newUser, err := old.merge(s.lg, user, s.PasswordStore) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newUser) { - return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.") - } - _, err = s.updateResource("/users/"+user.User, newUser) - if err == nil { - s.lg.Info("updated a user", zap.String("user-name", user.User)) - } - return newUser, err -} - -func (s *store) AllRoles() ([]string, error) { - nodes := []string{RootRoleName} - resp, err := s.requestResource("/roles/", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return nodes, nil - } - } - return nil, err - } - for _, n := range resp.Event.Node.Nodes { - _, role := path.Split(n.Key) - nodes = append(nodes, role) - } - sort.Strings(nodes) - return nodes, nil -} - -func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) } - -func (s *store) CreateRole(role Role) error { - if role.Role == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - _, err := s.createResource("/roles/"+role.Role, role) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - return authErr(http.StatusConflict, "Role %s already exists.", role.Role) - } - } - } - if err == nil { - s.lg.Info("created a new role", zap.String("role-name", role.Role)) - } - return err -} - -func (s *store) DeleteRole(name string) error { - if name == RootRoleName { - return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name) - } - err := s.deleteResource("/roles/" + name) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return authErr(http.StatusNotFound, "Role %s doesn't exist.", name) - } - } - } - if err == nil { - s.lg.Info("delete a new role", zap.String("role-name", name)) - } - return err -} - -func (s *store) UpdateRole(role Role) (Role, error) { - if role.Role == RootRoleName { - return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role) - } - old, err := s.getRole(role.Role, true) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role) - } - } - return old, err - } - newRole, err := old.merge(s.lg, role) - if err != nil { - return old, err - } - if reflect.DeepEqual(old, newRole) { - return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.") - } - _, err = s.updateResource("/roles/"+role.Role, newRole) - if err == nil { - s.lg.Info("updated a new role", zap.String("role-name", role.Role)) - } - return newRole, err -} - -func (s *store) AuthEnabled() bool { - return s.detectAuth() -} - -func (s *store) EnableAuth() error { - if s.AuthEnabled() { - return authErr(http.StatusConflict, "already enabled") - } - - if _, err := s.getUser("root", true); err != nil { - return authErr(http.StatusConflict, "No root user available, please create one") - } - if _, err := s.getRole(GuestRoleName, true); err != nil { - s.lg.Info( - "no guest role access found; creating default", - zap.String("role-name", GuestRoleName), - ) - if err := s.CreateRole(guestRole); err != nil { - s.lg.Warn( - "failed to create a guest role; aborting auth enable", - zap.String("role-name", GuestRoleName), - zap.Error(err), - ) - return err - } - } - - if err := s.enableAuth(); err != nil { - s.lg.Warn("failed to enable auth", zap.Error(err)) - return err - } - - s.lg.Info("enabled auth") - return nil -} - -func (s *store) DisableAuth() error { - if !s.AuthEnabled() { - return authErr(http.StatusConflict, "already disabled") - } - - err := s.disableAuth() - if err == nil { - s.lg.Info("disabled auth") - } else { - s.lg.Warn("failed to disable auth", zap.Error(err)) - } - return err -} - -// merge applies the properties of the passed-in User to the User on which it -// is called and returns a new User with these modifications applied. Think of -// all Users as immutable sets of data. Merge allows you to perform the set -// operations (desired grants and revokes) atomically -func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) { - var out User - if ou.User != nu.User { - return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User) - } - out.User = ou.User - if nu.Password != "" { - hash, err := s.HashPassword(nu.Password) - if err != nil { - return ou, err - } - out.Password = hash - } else { - out.Password = ou.Password - } - currentRoles := types.NewUnsafeSet(ou.Roles...) - for _, g := range nu.Grant { - if currentRoles.Contains(g) { - lg.Warn( - "attempted to grant a duplicate role for a user", - zap.String("user-name", nu.User), - zap.String("role-name", g), - ) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User)) - } - currentRoles.Add(g) - } - for _, r := range nu.Revoke { - if !currentRoles.Contains(r) { - lg.Warn( - "attempted to revoke a ungranted role for a user", - zap.String("user-name", nu.User), - zap.String("role-name", r), - ) - return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User)) - } - currentRoles.Remove(r) - } - out.Roles = currentRoles.Values() - sort.Strings(out.Roles) - return out, nil -} - -// merge for a role works the same as User above -- atomic Role application to -// each of the substructures. -func (r Role) merge(lg *zap.Logger, n Role) (Role, error) { - var out Role - var err error - if r.Role != n.Role { - return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role) - } - out.Role = r.Role - out.Permissions, err = r.Permissions.Grant(n.Grant) - if err != nil { - return out, err - } - out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke) - return out, err -} - -func (r Role) HasKeyAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasAccess(key, write) -} - -func (r Role) HasRecursiveAccess(key string, write bool) bool { - if r.Role == RootRoleName { - return true - } - return r.Permissions.KV.HasRecursiveAccess(key, write) -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Grant(n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Grant(n.KV) - return out, err -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) { - var out Permissions - var err error - if n == nil { - return p, nil - } - out.KV, err = p.KV.Revoke(lg, n.KV) - return out, err -} - -// Grant adds a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if currentRead.Contains(r) { - return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r) - } - currentRead.Add(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if currentWrite.Contains(w) { - return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w) - } - currentWrite.Add(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -// Revoke removes a set of permissions to the permission object on which it is called, -// returning a new permission object. -func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) { - var out RWPermission - currentRead := types.NewUnsafeSet(rw.Read...) - for _, r := range n.Read { - if !currentRead.Contains(r) { - lg.Info( - "revoking ungranted read permission", - zap.String("read-permission", r), - ) - continue - } - currentRead.Remove(r) - } - currentWrite := types.NewUnsafeSet(rw.Write...) - for _, w := range n.Write { - if !currentWrite.Contains(w) { - lg.Info( - "revoking ungranted write permission", - zap.String("write-permission", w), - ) - continue - } - currentWrite.Remove(w) - } - out.Read = currentRead.Values() - out.Write = currentWrite.Values() - sort.Strings(out.Read) - sort.Strings(out.Write) - return out, nil -} - -func (rw RWPermission) HasAccess(key string, write bool) bool { - var list []string - if write { - list = rw.Write - } else { - list = rw.Read - } - for _, pat := range list { - match, err := simpleMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool { - list := rw.Read - if write { - list = rw.Write - } - for _, pat := range list { - match, err := prefixMatch(pat, key) - if err == nil && match { - return true - } - } - return false -} - -func simpleMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] == '*' { - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil - } - return key == pattern, nil -} - -func prefixMatch(pattern string, key string) (match bool, err error) { - if pattern[len(pattern)-1] != '*' { - return false, nil - } - return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil -} - -func attachRootRole(u User) User { - inRoles := false - for _, r := range u.Roles { - if r == RootRoleName { - inRoles = true - break - } - } - if !inRoles { - u.Roles = append(u.Roles, RootRoleName) - } - return u -} - -func (s *store) getUser(name string, quorum bool) (User, error) { - resp, err := s.requestResource("/users/"+name, quorum) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name) - } - } - return User{}, err - } - var u User - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u) - if err != nil { - return u, err - } - // Attach root role to root user. - if u.User == "root" { - u = attachRootRole(u) - } - return u, nil -} - -func (s *store) getRole(name string, quorum bool) (Role, error) { - if name == RootRoleName { - return rootRole, nil - } - resp, err := s.requestResource("/roles/"+name, quorum) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name) - } - } - return Role{}, err - } - var r Role - err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r) - return r, err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go deleted file mode 100644 index 6c8c50c8cc..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2auth/auth_requests.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2auth - -import ( - "context" - "encoding/json" - "path" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - - "go.uber.org/zap" -) - -func (s *store) ensureAuthDirectories() error { - if s.ensuredOnce { - return nil - } - for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: res, - Dir: true, - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - cancel() - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - continue - } - } - s.lg.Warn( - "failed to create auth directories", - zap.Error(err), - ) - return err - } - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pe := false - rr := etcdserverpb.Request{ - Method: "PUT", - Path: StorePermsPrefix + "/enabled", - Val: "false", - PrevExist: &pe, - } - _, err := s.server.Do(ctx, rr) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeNodeExist { - s.ensuredOnce = true - return nil - } - } - return err - } - s.ensuredOnce = true - return nil -} - -func (s *store) enableAuth() error { - _, err := s.updateResource("/enabled", true) - return err -} -func (s *store) disableAuth() error { - _, err := s.updateResource("/enabled", false) - return err -} - -func (s *store) detectAuth() bool { - if s.server == nil { - return false - } - value, err := s.requestResource("/enabled", false) - if err != nil { - if e, ok := err.(*v2error.Error); ok { - if e.ErrorCode == v2error.EcodeKeyNotFound { - return false - } - } - s.lg.Warn( - "failed to detect auth settings", - zap.Error(err), - ) - return false - } - - var u bool - err = json.Unmarshal([]byte(*value.Event.Node.Value), &u) - if err != nil { - s.lg.Warn( - "internal bookkeeping value for enabled isn't valid JSON", - zap.Error(err), - ) - return false - } - return u -} - -func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - p := path.Join(StorePermsPrefix, res) - method := "GET" - if quorum { - method = "QGET" - } - rr := etcdserverpb.Request{ - Method: method, - Path: p, - Dir: false, // TODO: always false? - } - return s.server.Do(ctx, rr) -} - -func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, true) -} -func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) { - return s.setResource(res, value, false) -} -func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) { - err := s.ensureAuthDirectories() - if err != nil { - return etcdserver.Response{}, err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - data, err := json.Marshal(value) - if err != nil { - return etcdserver.Response{}, err - } - p := path.Join(StorePermsPrefix, res) - rr := etcdserverpb.Request{ - Method: "PUT", - Path: p, - Val: string(data), - PrevExist: &prevexist, - } - return s.server.Do(ctx, rr) -} - -func (s *store) deleteResource(res string) error { - err := s.ensureAuthDirectories() - if err != nil { - return err - } - ctx, cancel := context.WithTimeout(context.Background(), s.timeout) - defer cancel() - pex := true - p := path.Join(StorePermsPrefix, res) - _, err = s.server.Do(ctx, etcdserverpb.Request{ - Method: "DELETE", - Path: p, - PrevExist: &pex, - }) - return err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go index 623863254a..ab24757d76 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go @@ -125,10 +125,6 @@ type Error struct { Index uint64 `json:"index"` } -func NewRequestError(errorCode int, cause string) *Error { - return NewError(errorCode, cause, 0) -} - func NewError(errorCode int, cause string, index uint64) *Error { return &Error{ ErrorCode: errorCode, diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go deleted file mode 100644 index 718b5ed502..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/capability.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "fmt" - "net/http" - - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" -) - -func authCapabilityHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !api.IsCapabilityEnabled(api.AuthCapability) { - notCapable(w, r, api.AuthCapability) - return - } - fn(w, r) - } -} - -func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) { - herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c)) - if err := herr.WriteTo(w); err != nil { - // TODO: the following plog was removed, add the logging back if possible - // plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr) - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go index 17b420732e..7b56da3578 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client.go @@ -12,45 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package v2http provides etcd client and server implementations. package v2http import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" "net/http" - "net/url" - "path" - "strconv" - "strings" "time" - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - - "github.com/jonboulle/clockwork" "go.uber.org/zap" ) -const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - machinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" -) - // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time.Duration) http.Handler { if lg == nil { @@ -59,698 +32,19 @@ func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time mux := http.NewServeMux() etcdhttp.HandleBasic(lg, mux, server) etcdhttp.HandleMetricsHealth(lg, mux, server) - handleV2(lg, mux, server, timeout) return requestLogger(lg, mux) } -func handleV2(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) { - sec := v2auth.NewStore(lg, server, timeout) - kh := &keysHandler{ - lg: lg, - sec: sec, - server: server, - cluster: server.Cluster(), - timeout: timeout, - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - - sh := &statsHandler{ - lg: lg, - stats: server, - } - - mh := &membersHandler{ - lg: lg, - sec: sec, - server: server, - cluster: server.Cluster(), - timeout: timeout, - clock: clockwork.NewRealClock(), - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - - mah := &machinesHandler{cluster: server.Cluster()} - - sech := &authHandler{ - lg: lg, - sec: sec, - cluster: server.Cluster(), - clientCertAuthEnabled: server.ClientCertAuthEnabled(), - } - mux.HandleFunc("/", http.NotFound) - mux.Handle(keysPrefix, kh) - mux.Handle(keysPrefix+"/", kh) - mux.HandleFunc(statsPrefix+"/store", sh.serveStore) - mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) - mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.Handle(membersPrefix, mh) - mux.Handle(membersPrefix+"/", mh) - mux.Handle(machinesPrefix, mah) - handleAuth(mux, sech) -} - -type keysHandler struct { - lg *zap.Logger - sec v2auth.Store - server etcdserver.ServerV2 - cluster api.Cluster - timeout time.Duration - clientCertAuthEnabled bool -} - -func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") { - return - } - - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - clock := clockwork.NewRealClock() - startTime := clock.Now() - rr, noValueOnSuccess, err := parseKeyRequest(r, clock) - if err != nil { - writeKeyError(h.lg, w, err) - return - } - // The path must be valid at this point (we've parsed the request successfully). - if !hasKeyPrefixAccess(h.lg, h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) { - writeKeyNoAuth(w) - return - } - if !rr.Wait { - reportRequestReceived(rr) - } - resp, err := h.server.Do(ctx, rr) - if err != nil { - err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix) - writeKeyError(h.lg, w, err) - reportRequestFailed(rr, err) - return - } - switch { - case resp.Event != nil: - if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil { - // Should never be reached - h.lg.Warn("failed to write key event", zap.Error(err)) - } - reportRequestCompleted(rr, startTime) - case resp.Watcher != nil: - ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout) - defer cancel() - handleKeyWatch(ctx, h.lg, w, resp, rr.Stream) - default: - writeKeyError(h.lg, w, errors.New("received response with no Event/Watcher")) - } -} - -type machinesHandler struct { - cluster api.Cluster -} - -func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "HEAD") { - return - } - endpoints := h.cluster.ClientURLs() - w.Write([]byte(strings.Join(endpoints, ", "))) -} - -type membersHandler struct { - lg *zap.Logger - sec v2auth.Store - server etcdserver.ServerV2 - cluster api.Cluster - timeout time.Duration - clock clockwork.Clock - clientCertAuthEnabled bool -} - -func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") { - return - } - if !hasWriteRootAccess(h.lg, h.sec, r, h.clientCertAuthEnabled) { - writeNoAuth(h.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) - - ctx, cancel := context.WithTimeout(context.Background(), h.timeout) - defer cancel() - - switch r.Method { - case "GET": - switch trimPrefix(r.URL.Path, membersPrefix) { - case "": - mc := newMemberCollection(h.cluster.Members()) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(mc); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - case "leader": - id := h.server.Leader() - if id == 0 { - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election")) - return - } - m := newMember(h.cluster.Member(id)) - w.Header().Set("Content-Type", "application/json") - if err := json.NewEncoder(w).Encode(m); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - default: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found")) - } - - case "POST": - req := httptypes.MemberCreateRequest{} - if ok := unmarshalRequest(h.lg, r, &req, w); !ok { - return - } - now := h.clock.Now() - m := membership.NewMember("", req.PeerURLs, "", &now) - _, err := h.server.AddMember(ctx, *m) - switch { - case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - return - case err != nil: - h.lg.Warn( - "failed to add a member", - zap.String("member-id", m.ID.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - return - } - res := newMember(m) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusCreated) - if err := json.NewEncoder(w).Encode(res); err != nil { - h.lg.Warn("failed to encode members response", zap.Error(err)) - } - - case "DELETE": - id, ok := getID(h.lg, r.URL.Path, w) - if !ok { - return - } - _, err := h.server.RemoveMember(ctx, uint64(id)) - switch { - case err == membership.ErrIDRemoved: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) - case err == membership.ErrIDNotFound: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - h.lg.Warn( - "failed to remove a member", - zap.String("member-id", id.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - - case "PUT": - id, ok := getID(h.lg, r.URL.Path, w) - if !ok { - return - } - req := httptypes.MemberUpdateRequest{} - if ok := unmarshalRequest(h.lg, r, &req, w); !ok { - return - } - m := membership.Member{ - ID: id, - RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, - } - _, err := h.server.UpdateMember(ctx, m) - switch { - case err == membership.ErrPeerURLexists: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) - case err == membership.ErrIDNotFound: - writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id))) - case err != nil: - h.lg.Warn( - "failed to update a member", - zap.String("member-id", m.ID.String()), - zap.Error(err), - ) - writeError(h.lg, w, r, err) - default: - w.WriteHeader(http.StatusNoContent) - } - } -} - -type statsHandler struct { - lg *zap.Logger - stats stats.Stats -} - -func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.StoreStats()) -} - -func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(h.stats.SelfStats()) -} - -func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - stats := h.stats.LeaderStats() - if stats == nil { - etcdhttp.WriteError(h.lg, w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(stats) -} - -// parseKeyRequest converts a received http.Request on keysPrefix to -// a server Request, performing validation of supplied fields as appropriate. -// If any validation fails, an empty Request and non-nil error is returned. -func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) { - var noValueOnSuccess bool - emptyReq := etcdserverpb.Request{} - - err := r.ParseForm() - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidForm, - err.Error(), - ) - } - - if !strings.HasPrefix(r.URL.Path, keysPrefix) { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidForm, - "incorrect key prefix", - ) - } - p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):]) - - var pIdx, wIdx uint64 - if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeIndexNaN, - `invalid value for "prevIndex"`, - ) - } - if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeIndexNaN, - `invalid value for "waitIndex"`, - ) - } - - var rec, sort, wait, dir, quorum, stream bool - if rec, err = getBool(r.Form, "recursive"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "recursive"`, - ) - } - if sort, err = getBool(r.Form, "sorted"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "sorted"`, - ) - } - if wait, err = getBool(r.Form, "wait"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "wait"`, - ) - } - // TODO(jonboulle): define what parameters dir is/isn't compatible with? - if dir, err = getBool(r.Form, "dir"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "dir"`, - ) - } - if quorum, err = getBool(r.Form, "quorum"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "quorum"`, - ) - } - if stream, err = getBool(r.Form, "stream"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "stream"`, - ) - } - - if wait && r.Method != "GET" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `"wait" can only be used with GET requests`, - ) - } - - pV := r.FormValue("prevValue") - if _, ok := r.Form["prevValue"]; ok && pV == "" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodePrevValueRequired, - `"prevValue" cannot be empty`, - ) - } - - if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - `invalid value for "noValueOnSuccess"`, - ) - } - - // TTL is nullable, so leave it null if not specified - // or an empty string - var ttl *uint64 - if len(r.FormValue("ttl")) > 0 { - i, err := getUint64(r.Form, "ttl") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeTTLNaN, - `invalid value for "ttl"`, +func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if lg != nil { + lg.Debug( + "handling HTTP request", + zap.String("method", r.Method), + zap.String("request-uri", r.RequestURI), + zap.String("remote-addr", r.RemoteAddr), ) } - ttl = &i - } - - // prevExist is nullable, so leave it null if not specified - var pe *bool - if _, ok := r.Form["prevExist"]; ok { - bv, err := getBool(r.Form, "prevExist") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - "invalid value for prevExist", - ) - } - pe = &bv - } - - // refresh is nullable, so leave it null if not specified - var refresh *bool - if _, ok := r.Form["refresh"]; ok { - bv, err := getBool(r.Form, "refresh") - if err != nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeInvalidField, - "invalid value for refresh", - ) - } - refresh = &bv - if refresh != nil && *refresh { - val := r.FormValue("value") - if _, ok := r.Form["value"]; ok && val != "" { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeRefreshValue, - `A value was provided on a refresh`, - ) - } - if ttl == nil { - return emptyReq, false, v2error.NewRequestError( - v2error.EcodeRefreshTTLRequired, - `No TTL value set`, - ) - } - } - } - - rr := etcdserverpb.Request{ - Method: r.Method, - Path: p, - Val: r.FormValue("value"), - Dir: dir, - PrevValue: pV, - PrevIndex: pIdx, - PrevExist: pe, - Wait: wait, - Since: wIdx, - Recursive: rec, - Sorted: sort, - Quorum: quorum, - Stream: stream, - } - - if pe != nil { - rr.PrevExist = pe - } - - if refresh != nil { - rr.Refresh = refresh - } - - // Null TTL is equivalent to unset Expiration - if ttl != nil { - expr := time.Duration(*ttl) * time.Second - rr.Expiration = clock.Now().Add(expr).UnixNano() - } - - return rr, noValueOnSuccess, nil -} - -// writeKeyEvent trims the prefix of key path in a single Event under -// StoreKeysPrefix, serializes it and writes the resulting JSON to the given -// ResponseWriter, along with the appropriate headers. -func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error { - ev := resp.Event - if ev == nil { - return errors.New("cannot write empty Event") - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex)) - w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) - w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) - - if ev.IsCreated() { - w.WriteHeader(http.StatusCreated) - } - - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if noValueOnSuccess && - (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap || - ev.Action == v2store.Create || ev.Action == v2store.Update) { - ev.Node = nil - ev.PrevNode = nil - } - return json.NewEncoder(w).Encode(ev) -} - -func writeKeyNoAuth(w http.ResponseWriter) { - e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0) - e.WriteTo(w) -} - -// writeKeyError logs and writes the given Error to the ResponseWriter. -// If Error is not an etcdErr, the error will be converted to an etcd error. -func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) { - if err == nil { - return - } - switch e := err.(type) { - case *v2error.Error: - e.WriteTo(w) - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost: - if lg != nil { - lg.Warn( - "v2 response error", - zap.String("internal-server-error", err.Error()), - ) - } - default: - if lg != nil { - lg.Warn( - "unexpected v2 response error", - zap.String("internal-server-error", err.Error()), - ) - } - } - ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0) - ee.WriteTo(w) - } -} - -func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) { - wa := resp.Watcher - defer wa.Remove() - ech := wa.EventChan() - var nch <-chan bool - if x, ok := w.(http.CloseNotifier); ok { - nch = x.CloseNotify() - } - - w.Header().Set("Content-Type", "application/json") - w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex())) - w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index)) - w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term)) - w.WriteHeader(http.StatusOK) - - // Ensure headers are flushed early, in case of long polling - w.(http.Flusher).Flush() - - for { - select { - case <-nch: - // Client closed connection. Nothing to do. - return - case <-ctx.Done(): - // Timed out. net/http will close the connection for us, so nothing to do. - return - case ev, ok := <-ech: - if !ok { - // If the channel is closed this may be an indication of - // that notifications are much more than we are able to - // send to the client in time. Then we simply end streaming. - return - } - ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix) - if err := json.NewEncoder(w).Encode(ev); err != nil { - // Should never be reached - lg.Warn("failed to encode event", zap.Error(err)) - return - } - if !stream { - return - } - w.(http.Flusher).Flush() - } - } -} - -func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event { - if ev == nil { - return nil - } - // Since the *Event may reference one in the store history - // history, we must copy it before modifying - e := ev.Clone() - trimNodeExternPrefix(e.Node, prefix) - trimNodeExternPrefix(e.PrevNode, prefix) - return e -} - -func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) { - if n == nil { - return - } - n.Key = strings.TrimPrefix(n.Key, prefix) - for _, nn := range n.Nodes { - trimNodeExternPrefix(nn, prefix) - } -} - -func trimErrorPrefix(err error, prefix string) error { - if e, ok := err.(*v2error.Error); ok { - e.Cause = strings.TrimPrefix(e.Cause, prefix) - } - return err -} - -func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool { - ctype := r.Header.Get("Content-Type") - semicolonPosition := strings.Index(ctype, ";") - if semicolonPosition != -1 { - ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition])) - } - if ctype != "application/json" { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype))) - return false - } - b, err := ioutil.ReadAll(r.Body) - if err != nil { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - if err := req.UnmarshalJSON(b); err != nil { - writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error())) - return false - } - return true -} - -func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) { - idStr := trimPrefix(p, membersPrefix) - if idStr == "" { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return 0, false - } - id, err := types.IDFromString(idStr) - if err != nil { - writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr))) - return 0, false - } - return id, true -} - -// getUint64 extracts a uint64 by the given key from a Form. If the key does -// not exist in the form, 0 is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getUint64(form url.Values, key string) (i uint64, err error) { - if vals, ok := form[key]; ok { - i, err = strconv.ParseUint(vals[0], 10, 64) - } - return -} - -// getBool extracts a bool by the given key from a Form. If the key does not -// exist in the form, false is returned. If the key exists but the value is -// badly formed, an error is returned. If multiple values are present only the -// first is considered. -func getBool(form url.Values, key string) (b bool, err error) { - if vals, ok := form[key]; ok { - b, err = strconv.ParseBool(vals[0]) - } - return -} - -// trimPrefix removes a given prefix and any slash following the prefix -// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == "" -func trimPrefix(p, prefix string) (s string) { - s = strings.TrimPrefix(p, prefix) - s = strings.TrimPrefix(s, "/") - return -} - -func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection { - c := httptypes.MemberCollection(make([]httptypes.Member, len(ms))) - - for i, m := range ms { - c[i] = newMember(m) - } - - return &c -} - -func newMember(m *membership.Member) httptypes.Member { - tm := httptypes.Member{ - ID: m.ID.String(), - Name: m.Name, - PeerURLs: make([]string, len(m.PeerURLs)), - ClientURLs: make([]string, len(m.ClientURLs)), - } - - copy(tm.PeerURLs, m.PeerURLs) - copy(tm.ClientURLs, m.ClientURLs) - - return tm + handler.ServeHTTP(w, r) + }) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go deleted file mode 100644 index 2c6e7744ed..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/client_auth.go +++ /dev/null @@ -1,604 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "encoding/json" - "net/http" - "path" - "strings" - - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "go.uber.org/zap" -) - -type authHandler struct { - lg *zap.Logger - sec v2auth.Store - cluster api.Cluster - clientCertAuthEnabled bool -} - -func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if r.Method == "GET" || r.Method == "HEAD" { - return true - } - return hasRootAccess(lg, sec, r, clientCertAuthEnabled) -} - -func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { - username, password, ok := r.BasicAuth() - if !ok { - lg.Warn("malformed basic auth encoding") - return nil - } - user, err := sec.GetUser(username) - if err != nil { - return nil - } - - ok = sec.CheckPassword(user, password) - if !ok { - lg.Warn("incorrect password", zap.String("user-name", username)) - return nil - } - return &user -} - -func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User { - if r.TLS == nil { - return nil - } - - for _, chains := range r.TLS.VerifiedChains { - for _, chain := range chains { - lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName)) - user, err := sec.GetUser(chain.Subject.CommonName) - if err == nil { - lg.Debug( - "authenticated a user via common name", - zap.String("user-name", user.User), - zap.String("common-name", chain.Subject.CommonName), - ) - return &user - } - } - } - return nil -} - -func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var rootUser *v2auth.User - if r.Header.Get("Authorization") == "" && clientCertAuthEnabled { - rootUser = userFromClientCertificate(lg, sec, r) - if rootUser == nil { - return false - } - } else { - rootUser = userFromBasicAuth(lg, sec, r) - if rootUser == nil { - return false - } - } - - for _, role := range rootUser.Roles { - if role == v2auth.RootRoleName { - return true - } - } - - lg.Warn( - "a user does not have root role for resource", - zap.String("root-user", rootUser.User), - zap.String("root-role-name", v2auth.RootRoleName), - zap.String("resource-path", r.URL.Path), - ) - return false -} - -func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool { - if sec == nil { - // No store means no auth available, eg, tests. - return true - } - if !sec.AuthEnabled() { - return true - } - - var user *v2auth.User - if r.Header.Get("Authorization") == "" { - if clientCertAuthEnabled { - user = userFromClientCertificate(lg, sec, r) - } - if user == nil { - return hasGuestAccess(lg, sec, r, key) - } - } else { - user = userFromBasicAuth(lg, sec, r) - if user == nil { - return false - } - } - - writeAccess := r.Method != "GET" && r.Method != "HEAD" - for _, roleName := range user.Roles { - role, err := sec.GetRole(roleName) - if err != nil { - continue - } - if recursive { - if role.HasRecursiveAccess(key, writeAccess) { - return true - } - } else if role.HasKeyAccess(key, writeAccess) { - return true - } - } - - lg.Warn( - "invalid access for user on key", - zap.String("user-name", user.User), - zap.String("key", key), - ) - return false -} - -func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool { - writeAccess := r.Method != "GET" && r.Method != "HEAD" - role, err := sec.GetRole(v2auth.GuestRoleName) - if err != nil { - return false - } - if role.HasKeyAccess(key, writeAccess) { - return true - } - - lg.Warn( - "invalid access for a guest role on key", - zap.String("role-name", v2auth.GuestRoleName), - zap.String("key", key), - ) - return false -} - -func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) { - herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials") - if err := herr.WriteTo(w); err != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.Error(err), - ) - } -} - -func handleAuth(mux *http.ServeMux, sh *authHandler) { - mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles)) - mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles)) - mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers)) - mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers)) - mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable)) -} - -func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - roles, err := sh.sec.AllRoles() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - if roles == nil { - roles = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - var rolesCollections struct { - Roles []v2auth.Role `json:"roles"` - } - for _, roleName := range roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - rolesCollections.Roles = append(rolesCollections.Roles, role) - } - err = json.NewEncoder(w).Encode(rolesCollections) - - if err != nil { - sh.lg.Warn( - "failed to encode base roles", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - writeError(sh.lg, w, r, err) - return - } -} - -func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/roles/rolename/command". - // First item is an empty string, second is "roles" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseRoles(w, r) - return - } - if len(pieces) != 3 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forRole(w, r, pieces[2]) -} - -func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - data, err := sh.sec.GetRole(role) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - err = json.NewEncoder(w).Encode(data) - if err != nil { - sh.lg.Warn( - "failed to encode a role", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "PUT": - var in v2auth.Role - err := json.NewDecoder(r.Body).Decode(&in) - if err != nil { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if in.Role != role { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL")) - return - } - - var out v2auth.Role - - // create - if in.Grant.IsEmpty() && in.Revoke.IsEmpty() { - err = sh.sec.CreateRole(in) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - w.WriteHeader(http.StatusCreated) - out = in - } else { - if !in.Permissions.IsEmpty() { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke")) - return - } - out, err = sh.sec.UpdateRole(in) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - w.WriteHeader(http.StatusOK) - } - - err = json.NewEncoder(w).Encode(out) - if err != nil { - sh.lg.Warn( - "failed to encode a role", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "DELETE": - err := sh.sec.DeleteRole(role) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} - -type userWithRoles struct { - User string `json:"user"` - Roles []v2auth.Role `json:"roles,omitempty"` -} - -type usersCollections struct { - Users []userWithRoles `json:"users"` -} - -func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - users, err := sh.sec.AllUsers() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - if users == nil { - users = make([]string, 0) - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - ucs := usersCollections{} - for _, userName := range users { - var user v2auth.User - user, err = sh.sec.GetUser(userName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - uwr := userWithRoles{User: user.User} - for _, roleName := range user.Roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - continue - } - uwr.Roles = append(uwr.Roles, role) - } - - ucs.Users = append(ucs.Users, uwr) - } - err = json.NewEncoder(w).Encode(ucs) - - if err != nil { - sh.lg.Warn( - "failed to encode users", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - writeError(sh.lg, w, r, err) - return - } -} - -func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) { - subpath := path.Clean(r.URL.Path[len(authPrefix):]) - // Split "/users/username". - // First item is an empty string, second is "users" - pieces := strings.Split(subpath, "/") - if len(pieces) == 2 { - sh.baseUsers(w, r) - return - } - if len(pieces) != 3 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path")) - return - } - sh.forUser(w, r, pieces[2]) -} - -func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case "GET": - u, err := sh.sec.GetUser(user) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - err = r.ParseForm() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - uwr := userWithRoles{User: u.User} - for _, roleName := range u.Roles { - var role v2auth.Role - role, err = sh.sec.GetRole(roleName) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - uwr.Roles = append(uwr.Roles, role) - } - err = json.NewEncoder(w).Encode(uwr) - - if err != nil { - sh.lg.Warn( - "failed to encode roles", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "PUT": - var u v2auth.User - err := json.NewDecoder(r.Body).Decode(&u) - if err != nil { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body.")) - return - } - if u.User != user { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL")) - return - } - - var ( - out v2auth.User - created bool - ) - - if len(u.Grant) == 0 && len(u.Revoke) == 0 { - // create or update - if len(u.Roles) != 0 { - out, err = sh.sec.CreateUser(u) - } else { - // if user passes in both password and roles, we are unsure about his/her - // intention. - out, created, err = sh.sec.CreateOrUpdateUser(u) - } - - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } else { - // update case - if len(u.Roles) != 0 { - writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke")) - return - } - out, err = sh.sec.UpdateUser(u) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } - - if created { - w.WriteHeader(http.StatusCreated) - } else { - w.WriteHeader(http.StatusOK) - } - - out.Password = "" - - err = json.NewEncoder(w).Encode(out) - if err != nil { - sh.lg.Warn( - "failed to encode a user", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - return - } - return - - case "DELETE": - err := sh.sec.DeleteUser(user) - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} - -type enabled struct { - Enabled bool `json:"enabled"` -} - -func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") { - return - } - if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) { - writeNoAuth(sh.lg, w, r) - return - } - w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String()) - w.Header().Set("Content-Type", "application/json") - isEnabled := sh.sec.AuthEnabled() - switch r.Method { - case "GET": - jsonDict := enabled{isEnabled} - err := json.NewEncoder(w).Encode(jsonDict) - if err != nil { - sh.lg.Warn( - "failed to encode a auth state", - zap.String("url", r.URL.String()), - zap.Error(err), - ) - } - - case "PUT": - err := sh.sec.EnableAuth() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - - case "DELETE": - err := sh.sec.DisableAuth() - if err != nil { - writeError(sh.lg, w, r, err) - return - } - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go deleted file mode 100644 index 88138b80a8..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/http.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "math" - "net/http" - "strings" - "time" - - "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2auth" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "go.uber.org/zap" -) - -const ( - // time to wait for a Watch request - defaultWatchTimeout = time.Duration(math.MaxInt64) -) - -func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) { - if err == nil { - return - } - if e, ok := err.(v2auth.Error); ok { - herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) - if et := herr.WriteTo(w); et != nil { - if lg != nil { - lg.Debug( - "failed to write v2 HTTP error", - zap.String("remote-addr", r.RemoteAddr), - zap.String("v2auth-error", e.Error()), - zap.Error(et), - ) - } - } - return - } - etcdhttp.WriteError(lg, w, r, err) -} - -// allowMethod verifies that the given method is one of the allowed methods, -// and if not, it writes an error to w. A boolean is returned indicating -// whether or not the method is allowed. -func allowMethod(w http.ResponseWriter, m string, ms ...string) bool { - for _, meth := range ms { - if m == meth { - return true - } - } - w.Header().Set("Allow", strings.Join(ms, ",")) - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) - return false -} - -func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if lg != nil { - lg.Debug( - "handling HTTP request", - zap.String("method", r.Method), - zap.String("request-uri", r.RequestURI), - zap.String("remote-addr", r.RemoteAddr), - ) - } - handler.ServeHTTP(w, r) - }) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go deleted file mode 100644 index a5467be91e..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes/member.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package httptypes defines how etcd's HTTP API entities are serialized to and -// deserialized from JSON. -package httptypes - -import ( - "encoding/json" - - "go.etcd.io/etcd/client/pkg/v3/types" -) - -type Member struct { - ID string `json:"id"` - Name string `json:"name"` - PeerURLs []string `json:"peerURLs"` - ClientURLs []string `json:"clientURLs"` -} - -type MemberCreateRequest struct { - PeerURLs types.URLs -} - -type MemberUpdateRequest struct { - MemberCreateRequest -} - -func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{} - - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - urls, err := types.NewURLs(s.PeerURLs) - if err != nil { - return err - } - - m.PeerURLs = urls - return nil -} - -type MemberCollection []Member - -func (c *MemberCollection) MarshalJSON() ([]byte, error) { - d := struct { - Members []Member `json:"members"` - }{ - Members: []Member(*c), - } - - return json.Marshal(d) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go deleted file mode 100644 index bdbd8c71c1..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/metrics.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2http - -import ( - "net/http" - "strconv" - "time" - - "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" - - "github.com/prometheus/client_golang/prometheus" -) - -var ( - incomingEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "received_total", - Help: "Counter of requests received into the system (successfully parsed and authd).", - }, []string{"method"}) - - failedEvents = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "failed_total", - Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).", - }, []string{"method", "code"}) - - successfulEventsHandlingSec = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "http", - Name: "successful_duration_seconds", - Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).", - - // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2 - // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec - Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13), - }, []string{"method"}) -) - -func init() { - prometheus.MustRegister(incomingEvents) - prometheus.MustRegister(failedEvents) - prometheus.MustRegister(successfulEventsHandlingSec) -} - -func reportRequestReceived(request etcdserverpb.Request) { - incomingEvents.WithLabelValues(methodFromRequest(request)).Inc() -} - -func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) { - method := methodFromRequest(request) - successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds()) -} - -func reportRequestFailed(request etcdserverpb.Request, err error) { - method := methodFromRequest(request) - failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc() -} - -func methodFromRequest(request etcdserverpb.Request) string { - if request.Method == "GET" && request.Quorum { - return "QGET" - } - return request.Method -} - -func codeFromError(err error) int { - if err == nil { - return http.StatusInternalServerError - } - switch e := err.(type) { - case *v2error.Error: - return e.StatusCode() - case *httptypes.HTTPError: - return e.Code - default: - return http.StatusInternalServerError - } -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go deleted file mode 100644 index cbf60215a2..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/stats.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package v2stats defines a standard interface for etcd cluster statistics. -package v2stats - -type Stats interface { - // SelfStats returns the struct representing statistics of this server - SelfStats() []byte - // LeaderStats returns the statistics of all followers in the cluster - // if this server is leader. Otherwise, nil is returned. - LeaderStats() []byte - // StoreStats returns statistics of the store backing this EtcdServer - StoreStats() []byte -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go deleted file mode 100644 index d275e05718..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/cluster.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" -) - -func (s *v2v3Server) ID() types.ID { - // TODO: use an actual member ID - return types.ID(0xe7cd2f00d) -} -func (s *v2v3Server) ClientURLs() []string { panic("STUB") } -func (s *v2v3Server) Members() []*membership.Member { panic("STUB") } -func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") } -func (s *v2v3Server) Version() *semver.Version { panic("STUB") } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go deleted file mode 100644 index 71557ceb5c..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/server.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "net/http" - "time" - - pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/etcdserver/api" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - - "github.com/coreos/go-semver/semver" - "go.uber.org/zap" -) - -type fakeStats struct{} - -func (s *fakeStats) SelfStats() []byte { return nil } -func (s *fakeStats) LeaderStats() []byte { return nil } -func (s *fakeStats) StoreStats() []byte { return nil } - -type v2v3Server struct { - lg *zap.Logger - c *clientv3.Client - store *v2v3Store - fakeStats -} - -func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer { - return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)} -} - -func (s *v2v3Server) ClientCertAuthEnabled() bool { return false } - -func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") } -func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") } - -func (s *v2v3Server) Leader() types.ID { - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) - defer cancel() - resp, err := s.c.Status(ctx, s.c.Endpoints()[0]) - if err != nil { - return 0 - } - return types.ID(resp.Leader) -} - -func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { - // adding member as learner is not supported by V2 Server. - resp, err := s.c.MemberAdd(ctx, memb.PeerURLs) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - resp, err := s.c.MemberRemove(ctx, id) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) { - resp, err := s.c.MemberPromote(ctx, id) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) { - resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs) - if err != nil { - return nil, err - } - return v3MembersToMembership(resp.Members), nil -} - -func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member { - membs := make([]*membership.Member, len(v3membs)) - for i, m := range v3membs { - membs[i] = &membership.Member{ - ID: types.ID(m.ID), - RaftAttributes: membership.RaftAttributes{ - PeerURLs: m.PeerURLs, - IsLearner: m.IsLearner, - }, - Attributes: membership.Attributes{ - Name: m.Name, - ClientURLs: m.ClientURLs, - }, - } - } - return membs -} - -func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() } -func (s *v2v3Server) Cluster() api.Cluster { return s } -func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil } -func (s *v2v3Server) LeaderChangedNotify() <-chan struct{} { return nil } - -func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) { - applier := etcdserver.NewApplierV2(s.lg, s.store, nil) - reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier) - req := (*etcdserver.RequestV2)(&r) - resp, err := req.Handle(ctx, reqHandler) - if resp.Err != nil { - return resp, resp.Err - } - return resp, err -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go deleted file mode 100644 index 6d78cab719..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/store.go +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "fmt" - "path" - "sort" - "strings" - "time" - - "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/client/v3/concurrency" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -// store implements the Store interface for V2 using -// a v3 client. -type v2v3Store struct { - c *clientv3.Client - // pfx is the v3 prefix where keys should be stored. - pfx string - ctx context.Context -} - -const maxPathDepth = 63 - -var errUnsupported = fmt.Errorf("TTLs are unsupported") - -func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) } - -func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} } - -func (s *v2v3Store) Index() uint64 { panic("STUB") } - -func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) { - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).Then( - clientv3.OpGet(key+"/"), - clientv3.OpGet(key), - ).Commit() - if err != nil { - return nil, err - } - - if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) { - nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision) - if err != nil { - return nil, err - } - cidx, midx := uint64(0), uint64(0) - if len(kvs) > 0 { - cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision) - } - return &v2store.Event{ - Action: v2store.Get, - Node: &v2store.NodeExtern{ - Key: nodePath, - Dir: true, - Nodes: nodes, - CreatedIndex: cidx, - ModifiedIndex: midx, - }, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil - } - - kvs := resp.Responses[1].GetResponseRange().Kvs - if len(kvs) == 0 { - return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - - return &v2store.Event{ - Action: v2store.Get, - Node: s.mkV2Node(kvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) { - rootNodes, err := s.getDirDepth(nodePath, 1, rev) - if err != nil || !recursive { - if sorted { - sort.Sort(v2store.NodeExterns(rootNodes)) - } - return rootNodes, err - } - nextNodes := rootNodes - nodes := make(map[string]*v2store.NodeExtern) - // Breadth walk the subdirectories - for i := 2; len(nextNodes) > 0; i++ { - for _, n := range nextNodes { - nodes[n.Key] = n - if parent := nodes[path.Dir(n.Key)]; parent != nil { - parent.Nodes = append(parent.Nodes, n) - } - } - if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil { - return nil, err - } - } - - if sorted { - sort.Sort(v2store.NodeExterns(rootNodes)) - } - return rootNodes, nil -} - -func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) { - pd := s.mkPathDepth(nodePath, depth) - resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev)) - if err != nil { - return nil, err - } - - nodes := make([]*v2store.NodeExtern, len(resp.Kvs)) - for i, kv := range resp.Kvs { - nodes[i] = s.mkV2Node(kv) - } - return nodes, nil -} - -func (s *v2v3Store) Set( - nodePath string, - dir bool, - value string, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - ecode := 0 - applyf := func(stm concurrency.STM) error { - // build path if any directories in path do not exist - dirs := []string{} - for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { - pp := s.mkPath(p) - if stm.Rev(pp) > 0 { - ecode = v2error.EcodeNotDir - return nil - } - if stm.Rev(pp+"/") == 0 { - dirs = append(dirs, pp+"/") - } - } - for _, d := range dirs { - stm.Put(d, "") - } - - key := s.mkPath(nodePath) - if dir { - if stm.Rev(key) != 0 { - // exists as non-dir - ecode = v2error.EcodeNotDir - return nil - } - key = key + "/" - } else if stm.Rev(key+"/") != 0 { - ecode = v2error.EcodeNotFile - return nil - } - stm.Put(key, value, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), v2store.Set) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) - } - - createRev := resp.Header.Revision - var pn *v2store.NodeExtern - if pkv := prevKeyFromPuts(resp); pkv != nil { - pn = s.mkV2Node(pkv) - createRev = pkv.CreateRevision - } - - vp := &value - if dir { - vp = nil - } - return &v2store.Event{ - Action: v2store.Set, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: vp, - Dir: dir, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(createRev), - }, - PrevNode: pn, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - key := s.mkPath(nodePath) - ecode := 0 - applyf := func(stm concurrency.STM) error { - if rev := stm.Rev(key + "/"); rev != 0 { - ecode = v2error.EcodeNotFile - return nil - } - if rev := stm.Rev(key); rev == 0 { - ecode = v2error.EcodeKeyNotFound - return nil - } - stm.Put(key, newValue, clientv3.WithPrevKV()) - stm.Put(s.mkActionKey(), v2store.Update) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - - pkv := prevKeyFromPuts(resp) - return &v2store.Event{ - Action: v2store.Update, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: &newValue, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(pkv.CreateRevision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Create( - nodePath string, - dir bool, - value string, - unique bool, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - ecode := 0 - applyf := func(stm concurrency.STM) error { - ecode = 0 - key := s.mkPath(nodePath) - if unique { - // append unique item under the node path - for { - key = nodePath + "/" + fmt.Sprintf("%020s", time.Now()) - key = path.Clean(path.Join("/", key)) - key = s.mkPath(key) - if stm.Rev(key) == 0 { - break - } - } - } - if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 { - ecode = v2error.EcodeNodeExist - return nil - } - // build path if any directories in path do not exist - dirs := []string{} - for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) { - pp := s.mkPath(p) - if stm.Rev(pp) > 0 { - ecode = v2error.EcodeNotDir - return nil - } - if stm.Rev(pp+"/") == 0 { - dirs = append(dirs, pp+"/") - } - } - for _, d := range dirs { - stm.Put(d, "") - } - - if dir { - // directories marked with extra slash in key name - key += "/" - } - stm.Put(key, value) - stm.Put(s.mkActionKey(), v2store.Create) - return nil - } - - resp, err := s.newSTM(applyf) - if err != nil { - return nil, err - } - if ecode != 0 { - return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision)) - } - - var v *string - if !dir { - v = &value - } - - return &v2store.Event{ - Action: v2store.Create, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: v, - Dir: dir, - ModifiedIndex: mkV2Rev(resp.Header.Revision), - CreatedIndex: mkV2Rev(resp.Header.Revision), - }, - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) CompareAndSwap( - nodePath string, - prevValue string, - prevIndex uint64, - value string, - expireOpts v2store.TTLOptionSet, -) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() { - return nil, errUnsupported - } - - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).If( - s.mkCompare(nodePath, prevValue, prevIndex)..., - ).Then( - clientv3.OpPut(key, value, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap), - ).Else( - clientv3.OpGet(key), - clientv3.OpGet(key+"/"), - ).Commit() - - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, compareFail(nodePath, prevValue, prevIndex, resp) - } - - pkv := resp.Responses[0].GetResponsePut().PrevKv - return &v2store.Event{ - Action: v2store.CompareAndSwap, - Node: &v2store.NodeExtern{ - Key: nodePath, - Value: &value, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - if !dir && !recursive { - return s.deleteNode(nodePath) - } - if !recursive { - return s.deleteEmptyDir(nodePath) - } - - dels := make([]clientv3.Op, maxPathDepth+1) - dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()) - for i := 1; i < maxPathDepth; i++ { - dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix()) - } - dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete) - - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0), - clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0), - ).Then( - dels..., - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) - } - dresp := resp.Responses[0].GetResponseDeleteRange() - return &v2store.Event{ - Action: v2store.Delete, - PrevNode: s.mkV2Node(dresp.PrevKvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) { - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(), - ).Then( - clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.Delete), - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision)) - } - dresp := resp.Responses[0].GetResponseDeleteRange() - if len(dresp.PrevKvs) == 0 { - return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision)) - } - return &v2store.Event{ - Action: v2store.Delete, - PrevNode: s.mkV2Node(dresp.PrevKvs[0]), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) { - resp, err := s.c.Txn(s.ctx).If( - clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0), - ).Then( - clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.Delete), - ).Commit() - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs - if len(pkvs) == 0 { - return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - pkv := pkvs[0] - return &v2store.Event{ - Action: v2store.Delete, - Node: &v2store.NodeExtern{ - Key: nodePath, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) { - if isRoot(nodePath) { - return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0) - } - - key := s.mkPath(nodePath) - resp, err := s.c.Txn(s.ctx).If( - s.mkCompare(nodePath, prevValue, prevIndex)..., - ).Then( - clientv3.OpDelete(key, clientv3.WithPrevKV()), - clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete), - ).Else( - clientv3.OpGet(key), - clientv3.OpGet(key+"/"), - ).Commit() - - if err != nil { - return nil, err - } - if !resp.Succeeded { - return nil, compareFail(nodePath, prevValue, prevIndex, resp) - } - - // len(pkvs) > 1 since txn only succeeds when key exists - pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0] - return &v2store.Event{ - Action: v2store.CompareAndDelete, - Node: &v2store.NodeExtern{ - Key: nodePath, - CreatedIndex: mkV2Rev(pkv.CreateRevision), - ModifiedIndex: mkV2Rev(resp.Header.Revision), - }, - PrevNode: s.mkV2Node(pkv), - EtcdIndex: mkV2Rev(resp.Header.Revision), - }, nil -} - -func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error { - if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 { - return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision)) - } - kvs := resp.Responses[0].GetResponseRange().Kvs - if len(kvs) == 0 { - return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision)) - } - kv := kvs[0] - indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex) - valueMatch := prevValue == "" || string(kv.Value) == prevValue - var cause string - switch { - case indexMatch && !valueMatch: - cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value)) - case valueMatch && !indexMatch: - cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision) - default: - cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision) - } - return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision)) -} - -func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp { - key := s.mkPath(nodePath) - cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)} - if prevIndex != 0 { - cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex))) - } - if prevValue != "" { - cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue)) - } - return cmps -} - -func (s *v2v3Store) JsonStats() []byte { panic("STUB") } -func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") } - -func (s *v2v3Store) Version() int { return 2 } - -// TODO: move this out of the Store interface? - -func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") } -func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") } -func (s *v2v3Store) Clone() v2store.Store { panic("STUB") } -func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") } -func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") } - -func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) } - -func (s *v2v3Store) mkNodePath(p string) string { - return path.Clean(p[len(s.pfx)+len("/k/000/"):]) -} - -// mkPathDepth makes a path to a key that encodes its directory depth -// for fast directory listing. If a depth is provided, it is added -// to the computed depth. -func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string { - normalForm := path.Clean(path.Join("/", nodePath)) - n := strings.Count(normalForm, "/") + depth - return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm) -} - -func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" } - -func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" } - -func mkV2Rev(v3Rev int64) uint64 { - if v3Rev == 0 { - return 0 - } - return uint64(v3Rev - 1) -} - -func mkV3Rev(v2Rev uint64) int64 { - if v2Rev == 0 { - return 0 - } - return int64(v2Rev + 1) -} - -// mkV2Node creates a V2 NodeExtern from a V3 KeyValue -func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern { - if kv == nil { - return nil - } - n := &v2store.NodeExtern{ - Key: s.mkNodePath(string(kv.Key)), - Dir: kv.Key[len(kv.Key)-1] == '/', - CreatedIndex: mkV2Rev(kv.CreateRevision), - ModifiedIndex: mkV2Rev(kv.ModRevision), - } - if !n.Dir { - v := string(kv.Value) - n.Value = &v - } - return n -} - -// prevKeyFromPuts gets the prev key that is being put; ignores -// the put action response. -func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue { - for _, r := range resp.Responses { - pkv := r.GetResponsePut().PrevKv - if pkv != nil && pkv.CreateRevision > 0 { - return pkv - } - } - return nil -} - -func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) { - return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable)) -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go deleted file mode 100644 index 046c25d450..0000000000 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2v3/watcher.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2017 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2v3 - -import ( - "context" - "strings" - - "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" -) - -func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) { - ctx, cancel := context.WithCancel(s.ctx) - wch := s.c.Watch( - ctx, - // TODO: very pricey; use a single store-wide watch in future - s.pfx, - clientv3.WithPrefix(), - clientv3.WithRev(int64(sinceIndex)), - clientv3.WithCreatedNotify(), - clientv3.WithPrevKV()) - resp, ok := <-wch - if err := resp.Err(); err != nil || !ok { - cancel() - return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0) - } - - evc, donec := make(chan *v2store.Event), make(chan struct{}) - go func() { - defer func() { - close(evc) - close(donec) - }() - for resp := range wch { - for _, ev := range s.mkV2Events(resp) { - k := ev.Node.Key - if recursive { - if !strings.HasPrefix(k, prefix) { - continue - } - // accept events on hidden keys given in prefix - k = strings.Replace(k, prefix, "/", 1) - // ignore hidden keys deeper than prefix - if strings.Contains(k, "/_") { - continue - } - } - if !recursive && k != prefix { - continue - } - select { - case evc <- ev: - case <-ctx.Done(): - return - } - if !stream { - return - } - } - } - }() - - return &v2v3Watcher{ - startRev: resp.Header.Revision, - evc: evc, - donec: donec, - cancel: cancel, - }, nil -} - -func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) { - ak := s.mkActionKey() - for _, rev := range mkRevs(wr) { - var act, key *clientv3.Event - for _, ev := range rev { - if string(ev.Kv.Key) == ak { - act = ev - } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) { - // use longest key to ignore intermediate new - // directories from Create. - key = ev - } else if key == nil { - key = ev - } - } - if act != nil && act.Kv != nil && key != nil { - v2ev := &v2store.Event{ - Action: string(act.Kv.Value), - Node: s.mkV2Node(key.Kv), - PrevNode: s.mkV2Node(key.PrevKv), - EtcdIndex: mkV2Rev(wr.Header.Revision), - } - evs = append(evs, v2ev) - } - } - return evs -} - -func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) { - var curRev []*clientv3.Event - for _, ev := range wr.Events { - if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision { - revs = append(revs, curRev) - curRev = nil - } - curRev = append(curRev, ev) - } - if curRev != nil { - revs = append(revs, curRev) - } - return revs -} - -type v2v3Watcher struct { - startRev int64 - evc chan *v2store.Event - donec chan struct{} - cancel context.CancelFunc -} - -func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) } - -func (w *v2v3Watcher) Remove() { - w.cancel() - <-w.donec -} - -func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go index 3038813cc8..6dfcfd1179 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go @@ -20,8 +20,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" "go.uber.org/zap" ) @@ -30,6 +29,14 @@ type BackendGetter interface { Backend() backend.Backend } +type AlarmBackend interface { + CreateAlarmBucket() + MustPutAlarm(member *pb.AlarmMember) + MustDeleteAlarm(alarm *pb.AlarmMember) + GetAllAlarms() ([]*pb.AlarmMember, error) + ForceCommit() +} + type alarmSet map[types.ID]*pb.AlarmMember // AlarmStore persists alarms to the backend. @@ -38,14 +45,14 @@ type AlarmStore struct { mu sync.Mutex types map[pb.AlarmType]alarmSet - bg BackendGetter + be AlarmBackend } -func NewAlarmStore(lg *zap.Logger, bg BackendGetter) (*AlarmStore, error) { +func NewAlarmStore(lg *zap.Logger, be AlarmBackend) (*AlarmStore, error) { if lg == nil { lg = zap.NewNop() } - ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), bg: bg} + ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), be: be} err := ret.restore() return ret, err } @@ -59,16 +66,7 @@ func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember { return m } - v, err := newAlarm.Marshal() - if err != nil { - a.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - b := a.bg.Backend() - b.BatchTx().Lock() - b.BatchTx().UnsafePut(buckets.Alarm, v, nil) - b.BatchTx().Unlock() - + a.be.MustPutAlarm(newAlarm) return newAlarm } @@ -88,16 +86,7 @@ func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember { delete(t, id) - v, err := m.Marshal() - if err != nil { - a.lg.Panic("failed to marshal alarm member", zap.Error(err)) - } - - b := a.bg.Backend() - b.BatchTx().Lock() - b.BatchTx().UnsafeDelete(buckets.Alarm, v) - b.BatchTx().Unlock() - + a.be.MustDeleteAlarm(m) return m } @@ -119,22 +108,15 @@ func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) { } func (a *AlarmStore) restore() error { - b := a.bg.Backend() - tx := b.BatchTx() - - tx.Lock() - tx.UnsafeCreateBucket(buckets.Alarm) - err := tx.UnsafeForEach(buckets.Alarm, func(k, v []byte) error { - var m pb.AlarmMember - if err := m.Unmarshal(k); err != nil { - return err - } - a.addToMap(&m) - return nil - }) - tx.Unlock() - - b.ForceCommit() + a.be.CreateAlarmBucket() + ms, err := a.be.GetAllAlarms() + if err != nil { + return err + } + for _, m := range ms { + a.addToMap(m) + } + a.be.ForceCommit() return err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go index 083c72ede2..380fdfe2a3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go @@ -20,7 +20,7 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "github.com/jonboulle/clockwork" "go.uber.org/zap" @@ -54,8 +54,9 @@ func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevG period: h, rg: rg, c: c, - revs: make([]int64, 0), } + // revs won't be longer than the retentions. + pc.revs = make([]int64, 0, pc.getRetentions()) pc.ctx, pc.cancel = context.WithCancel(context.Background()) return pc } @@ -66,7 +67,7 @@ Compaction period 1-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 1-hour 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 24-hour: @@ -74,7 +75,7 @@ Compaction period 24-hour: 2. record revisions for every 1/10 of 1-hour (6-minute) 3. keep recording revisions with no compaction for first 24-hour 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 1-hour (6-minute) Compaction period 59-min: @@ -82,7 +83,7 @@ Compaction period 59-min: 2. record revisions for every 1/10 of 59-min (5.9-min) 3. keep recording revisions with no compaction for first 59-min 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 59-min (5.9-min) Compaction period 5-sec: @@ -90,7 +91,7 @@ Compaction period 5-sec: 2. record revisions for every 1/10 of 5-sec (0.5-sec) 3. keep recording revisions with no compaction for first 5-sec 4. do compact with revs[0] - - success? contiue on for-loop and move sliding window; revs = revs[1:] + - success? continue on for-loop and move sliding window; revs = revs[1:] - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec) */ @@ -113,9 +114,9 @@ func (pc *Periodic) Run() { case <-pc.ctx.Done(): return case <-pc.clock.After(retryInterval): - pc.mu.Lock() + pc.mu.RLock() p := pc.paused - pc.mu.Unlock() + pc.mu.RUnlock() if p { continue } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go index 83be627947..37492f2b4d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go @@ -20,7 +20,7 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "github.com/jonboulle/clockwork" "go.uber.org/zap" diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go new file mode 100644 index 0000000000..cb91d6ed7c --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go @@ -0,0 +1,572 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3discovery provides an implementation of the cluster discovery that +// is used by etcd with v3 client. +package v3discovery + +import ( + "context" + "crypto/tls" + "errors" + + "math" + "path" + "sort" + "strconv" + "strings" + "time" + + "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/client/v3" + + "github.com/jonboulle/clockwork" + "go.uber.org/zap" +) + +const ( + discoveryPrefix = "/_etcd/registry" +) + +var ( + ErrInvalidURL = errors.New("discovery: invalid peer URL") + ErrBadSizeKey = errors.New("discovery: size key is bad") + ErrSizeNotFound = errors.New("discovery: size key not found") + ErrFullCluster = errors.New("discovery: cluster is full") + ErrTooManyRetries = errors.New("discovery: too many retries") +) + +var ( + // Number of retries discovery will attempt before giving up and error out. + nRetries = uint(math.MaxUint32) + maxExponentialRetries = uint(8) +) + +type DiscoveryConfig struct { + Token string `json:"discovery-token"` + Endpoints []string `json:"discovery-endpoints"` + + DialTimeout time.Duration `json:"discovery-dial-timeout"` + RequestTimeOut time.Duration `json:"discovery-request-timeout"` + KeepAliveTime time.Duration `json:"discovery-keepalive-time"` + KeepAliveTimeout time.Duration `json:"discovery-keepalive-timeout"` + + InsecureTransport bool `json:"discovery-insecure-transport"` + InsecureSkipVerify bool `json:"discovery-insecure-skip-tls-verify"` + CertFile string `json:"discovery-cert"` + KeyFile string `json:"discovery-key"` + TrustedCAFile string `json:"discovery-cacert"` + + User string `json:"discovery-user"` + Password string `json:"discovery-password"` +} + +type memberInfo struct { + // peerRegKey is the key used by the member when registering in the + // discovery service. + // Format: "/_etcd/registry//members/". + peerRegKey string + // peerURLsMap format: "peerName=peerURLs", i.e., "member1=http://127.0.0.1:2380". + peerURLsMap string + // createRev is the member's CreateRevision in the etcd cluster backing + // the discovery service. + createRev int64 +} + +type clusterInfo struct { + clusterToken string + members []memberInfo +} + +// key prefix for each cluster: "/_etcd/registry/". +func geClusterKeyPrefix(cluster string) string { + return path.Join(discoveryPrefix, cluster) +} + +// key format for cluster size: "/_etcd/registry//_config/size". +func geClusterSizeKey(cluster string) string { + return path.Join(geClusterKeyPrefix(cluster), "_config/size") +} + +// key prefix for each member: "/_etcd/registry//members". +func getMemberKeyPrefix(clusterToken string) string { + return path.Join(geClusterKeyPrefix(clusterToken), "members") +} + +// key format for each member: "/_etcd/registry//members/". +func getMemberKey(cluster, memberId string) string { + return path.Join(getMemberKeyPrefix(cluster), memberId) +} + +// GetCluster will connect to the discovery service at the given endpoints and +// retrieve a string describing the cluster +func GetCluster(lg *zap.Logger, cfg *DiscoveryConfig) (cs string, rerr error) { + d, err := newDiscovery(lg, cfg, 0) + if err != nil { + return "", err + } + + defer d.close() + defer func() { + if rerr != nil { + d.lg.Error( + "discovery failed to get cluster", + zap.String("cluster", cs), + zap.Error(rerr), + ) + } else { + d.lg.Info( + "discovery got cluster successfully", + zap.String("cluster", cs), + ) + } + }() + + return d.getCluster() +} + +// JoinCluster will connect to the discovery service at the endpoints, and +// register the server represented by the given id and config to the cluster. +// The parameter `config` is supposed to be in the format "memberName=peerURLs", +// such as "member1=http://127.0.0.1:2380". +// +// The final returned string has the same format as "--initial-cluster", such as +// "infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380". +func JoinCluster(lg *zap.Logger, cfg *DiscoveryConfig, id types.ID, config string) (cs string, rerr error) { + d, err := newDiscovery(lg, cfg, id) + if err != nil { + return "", err + } + + defer d.close() + defer func() { + if rerr != nil { + d.lg.Error( + "discovery failed to join cluster", + zap.String("cluster", cs), + zap.Error(rerr), + ) + } else { + d.lg.Info( + "discovery joined cluster successfully", + zap.String("cluster", cs), + ) + } + }() + + return d.joinCluster(config) +} + +type discovery struct { + lg *zap.Logger + clusterToken string + memberId types.ID + c *clientv3.Client + retries uint + + cfg *DiscoveryConfig + + clock clockwork.Clock +} + +func newDiscovery(lg *zap.Logger, dcfg *DiscoveryConfig, id types.ID) (*discovery, error) { + if lg == nil { + lg = zap.NewNop() + } + + lg = lg.With(zap.String("discovery-token", dcfg.Token), zap.String("discovery-endpoints", strings.Join(dcfg.Endpoints, ","))) + cfg, err := newClientCfg(dcfg, lg) + if err != nil { + return nil, err + } + + c, err := clientv3.New(*cfg) + if err != nil { + return nil, err + } + return &discovery{ + lg: lg, + clusterToken: dcfg.Token, + memberId: id, + c: c, + cfg: dcfg, + clock: clockwork.NewRealClock(), + }, nil +} + +// The following function follows the same logic as etcdctl, refer to +// https://github.com/etcd-io/etcd/blob/f9a8c49c695b098d66a07948666664ea10d01a82/etcdctl/ctlv3/command/global.go#L191-L250 +func newClientCfg(dcfg *DiscoveryConfig, lg *zap.Logger) (*clientv3.Config, error) { + var cfgtls *transport.TLSInfo + + if dcfg.CertFile != "" || dcfg.KeyFile != "" || dcfg.TrustedCAFile != "" { + cfgtls = &transport.TLSInfo{ + CertFile: dcfg.CertFile, + KeyFile: dcfg.KeyFile, + TrustedCAFile: dcfg.TrustedCAFile, + Logger: lg, + } + } + + cfg := &clientv3.Config{ + Endpoints: dcfg.Endpoints, + DialTimeout: dcfg.DialTimeout, + DialKeepAliveTime: dcfg.KeepAliveTime, + DialKeepAliveTimeout: dcfg.KeepAliveTimeout, + Username: dcfg.User, + Password: dcfg.Password, + } + + if cfgtls != nil { + if clientTLS, err := cfgtls.ClientConfig(); err == nil { + cfg.TLS = clientTLS + } else { + return nil, err + } + } + + // If key/cert is not given but user wants secure connection, we + // should still setup an empty tls configuration for gRPC to setup + // secure connection. + if cfg.TLS == nil && !dcfg.InsecureTransport { + cfg.TLS = &tls.Config{} + } + + // If the user wants to skip TLS verification then we should set + // the InsecureSkipVerify flag in tls configuration. + if cfg.TLS != nil && dcfg.InsecureSkipVerify { + cfg.TLS.InsecureSkipVerify = true + } + + return cfg, nil +} + +func (d *discovery) getCluster() (string, error) { + cls, clusterSize, rev, err := d.checkCluster() + if err != nil { + if err == ErrFullCluster { + return cls.getInitClusterStr(clusterSize) + } + return "", err + } + + for cls.Len() < clusterSize { + d.waitPeers(cls, clusterSize, rev) + } + + return cls.getInitClusterStr(clusterSize) +} + +func (d *discovery) joinCluster(config string) (string, error) { + _, _, _, err := d.checkCluster() + if err != nil { + return "", err + } + + if err := d.registerSelf(config); err != nil { + return "", err + } + + cls, clusterSize, rev, err := d.checkCluster() + if err != nil { + return "", err + } + + for cls.Len() < clusterSize { + d.waitPeers(cls, clusterSize, rev) + } + + return cls.getInitClusterStr(clusterSize) +} + +func (d *discovery) getClusterSize() (int, error) { + configKey := geClusterSizeKey(d.clusterToken) + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeOut) + defer cancel() + + resp, err := d.c.Get(ctx, configKey) + if err != nil { + d.lg.Warn( + "failed to get cluster size from discovery service", + zap.String("clusterSizeKey", configKey), + zap.Error(err), + ) + return 0, err + } + + if len(resp.Kvs) == 0 { + return 0, ErrSizeNotFound + } + + clusterSize, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 0) + if err != nil || clusterSize <= 0 { + return 0, ErrBadSizeKey + } + + return int(clusterSize), nil +} + +func (d *discovery) getClusterMembers() (*clusterInfo, int64, error) { + membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeOut) + defer cancel() + + resp, err := d.c.Get(ctx, membersKeyPrefix, clientv3.WithPrefix()) + if err != nil { + d.lg.Warn( + "failed to get cluster members from discovery service", + zap.String("membersKeyPrefix", membersKeyPrefix), + zap.Error(err), + ) + return nil, 0, err + } + + cls := &clusterInfo{clusterToken: d.clusterToken} + for _, kv := range resp.Kvs { + mKey := strings.TrimSpace(string(kv.Key)) + mValue := strings.TrimSpace(string(kv.Value)) + + if err := cls.add(mKey, mValue, kv.CreateRevision); err != nil { + d.lg.Warn( + err.Error(), + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } else { + d.lg.Info( + "found peer from discovery service", + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } + } + + return cls, resp.Header.Revision, nil +} + +func (d *discovery) checkClusterRetry() (*clusterInfo, int, int64, error) { + if d.retries < nRetries { + d.logAndBackoffForRetry("cluster status check") + return d.checkCluster() + } + return nil, 0, 0, ErrTooManyRetries +} + +func (d *discovery) checkCluster() (*clusterInfo, int, int64, error) { + clusterSize, err := d.getClusterSize() + if err != nil { + if err == ErrSizeNotFound || err == ErrBadSizeKey { + return nil, 0, 0, err + } + + return d.checkClusterRetry() + } + + cls, rev, err := d.getClusterMembers() + if err != nil { + return d.checkClusterRetry() + } + d.retries = 0 + + // find self position + memberSelfId := getMemberKey(d.clusterToken, d.memberId.String()) + idx := 0 + for _, m := range cls.members { + if m.peerRegKey == memberSelfId { + break + } + if idx >= clusterSize-1 { + return cls, clusterSize, rev, ErrFullCluster + } + idx++ + } + return cls, clusterSize, rev, nil +} + +func (d *discovery) registerSelfRetry(contents string) error { + if d.retries < nRetries { + d.logAndBackoffForRetry("register member itself") + return d.registerSelf(contents) + } + return ErrTooManyRetries +} + +func (d *discovery) registerSelf(contents string) error { + ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeOut) + memberKey := getMemberKey(d.clusterToken, d.memberId.String()) + _, err := d.c.Put(ctx, memberKey, contents) + cancel() + + if err != nil { + d.lg.Warn( + "failed to register members itself to the discovery service", + zap.String("memberKey", memberKey), + zap.Error(err), + ) + return d.registerSelfRetry(contents) + } + d.retries = 0 + + d.lg.Info( + "register member itself successfully", + zap.String("memberKey", memberKey), + zap.String("memberInfo", contents), + ) + + return nil +} + +func (d *discovery) waitPeers(cls *clusterInfo, clusterSize int, rev int64) { + // watch from the next revision + membersKeyPrefix := getMemberKeyPrefix(d.clusterToken) + w := d.c.Watch(context.Background(), membersKeyPrefix, clientv3.WithPrefix(), clientv3.WithRev(rev+1)) + + d.lg.Info( + "waiting for peers from discovery service", + zap.Int("clusterSize", clusterSize), + zap.Int("found-peers", cls.Len()), + ) + + // waiting for peers until all needed peers are returned + for wresp := range w { + for _, ev := range wresp.Events { + mKey := strings.TrimSpace(string(ev.Kv.Key)) + mValue := strings.TrimSpace(string(ev.Kv.Value)) + + if err := cls.add(mKey, mValue, ev.Kv.CreateRevision); err != nil { + d.lg.Warn( + err.Error(), + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } else { + d.lg.Info( + "found peer from discovery service", + zap.String("memberKey", mKey), + zap.String("memberInfo", mValue), + ) + } + } + + if cls.Len() >= clusterSize { + break + } + } + + d.lg.Info( + "found all needed peers from discovery service", + zap.Int("clusterSize", clusterSize), + zap.Int("found-peers", cls.Len()), + ) +} + +func (d *discovery) logAndBackoffForRetry(step string) { + d.retries++ + // logAndBackoffForRetry stops exponential backoff when the retries are + // more than maxExpoentialRetries and is set to a constant backoff afterward. + retries := d.retries + if retries > maxExponentialRetries { + retries = maxExponentialRetries + } + retryTimeInSecond := time.Duration(0x1< clusterSize { + peerURLs = peerURLs[:clusterSize] + } + + us := strings.Join(peerURLs, ",") + _, err := types.NewURLsMap(us) + if err != nil { + return us, ErrInvalidURL + } + + return us, nil +} + +func (cls *clusterInfo) getPeerURLs() []string { + var peerURLs []string + for _, peer := range cls.members { + peerURLs = append(peerURLs, peer.peerURLsMap) + } + return peerURLs +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go index 26c52b385b..ea3dd75705 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go @@ -36,19 +36,21 @@ const ( maxSendBytes = math.MaxInt32 ) -func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { +func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls}) opts = append(opts, grpc.Creds(bundle.TransportCredentials())) } - chainUnaryInterceptors := []grpc.UnaryServerInterceptor{ newLogUnaryInterceptor(s), newUnaryInterceptor(s), grpc_prometheus.UnaryServerInterceptor, } + if interceptor != nil { + chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor) + } chainStreamInterceptors := []grpc.StreamServerInterceptor{ newStreamInterceptor(s), diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go index 5c80fcf041..47f75654e2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go @@ -18,6 +18,7 @@ import ( "context" "sync" "time" + "unicode/utf8" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" @@ -33,9 +34,8 @@ import ( ) const ( - maxNoLeaderCnt = 3 - warnUnaryRequestLatency = 300 * time.Millisecond - snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" + maxNoLeaderCnt = 3 + snapshotMethod = "/etcdserverpb.Maintenance/Snapshot" ) type streamsMap struct { @@ -50,7 +50,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { } if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { - return nil, rpctypes.ErrGPRCNotSupportedForLearner + return nil, rpctypes.ErrGRPCNotSupportedForLearner } md, ok := metadata.FromIncomingContext(ctx) @@ -59,6 +59,9 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { if len(vs) > 0 { ver = vs[0] } + if !utf8.ValidString(ver) { + return nil, rpctypes.ErrGRPCInvalidClientAPIVersion + } clientRequests.WithLabelValues("unary", ver).Inc() if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { @@ -77,20 +80,20 @@ func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerIntercepto startTime := time.Now() resp, err := handler(ctx, req) lg := s.Logger() - if lg != nil { // acquire stats if debug level is enabled or request is expensive - defer logUnaryRequestStats(ctx, lg, info, startTime, req, resp) + if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive + defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp) } return resp, err } } -func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) { +func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, warnLatency time.Duration, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) { duration := time.Since(startTime) var enabledDebugLevel, expensiveRequest bool if lg.Core().Enabled(zap.DebugLevel) { enabledDebugLevel = true } - if duration > warnUnaryRequestLatency { + if duration > warnLatency { expensiveRequest = true } if !enabledDebugLevel && !expensiveRequest { @@ -216,7 +219,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor } if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot - return rpctypes.ErrGPRCNotSupportedForLearner + return rpctypes.ErrGRPCNotSupportedForLearner } md, ok := metadata.FromIncomingContext(ss.Context()) @@ -225,6 +228,9 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor if len(vs) > 0 { ver = vs[0] } + if !utf8.ValidString(ver) { + return rpctypes.ErrGRPCInvalidClientAPIVersion + } clientRequests.WithLabelValues("stream", ver).Inc() if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go index d1a7ee6334..2c1de2a90d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go @@ -115,6 +115,15 @@ func checkRangeRequest(r *pb.RangeRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } + + if _, ok := pb.RangeRequest_SortOrder_name[int32(r.SortOrder)]; !ok { + return rpctypes.ErrGRPCInvalidSortOption + } + + if _, ok := pb.RangeRequest_SortTarget_name[int32(r.SortTarget)]; !ok { + return rpctypes.ErrGRPCInvalidSortOption + } + return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go index 38cc913716..59732f6190 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go @@ -27,8 +27,9 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/mvcc" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -100,6 +101,11 @@ func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRe const snapshotSendBufferSize = 32 * 1024 func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error { + ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx()) + storageVersion := "" + if ver != nil { + storageVersion = ver.String() + } snap := ms.bg.Backend().Snapshot() pr, pw := io.Pipe() @@ -125,6 +131,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance ms.lg.Info("sending database snapshot to client", zap.Int64("total-bytes", total), zap.String("size", size), + zap.String("storage-version", storageVersion), ) for total-sent > 0 { // buffer just holds read bytes from stream @@ -151,6 +158,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance resp := &pb.SnapshotResponse{ RemainingBytes: uint64(total - sent), Blob: buf[:n], + Version: storageVersion, } if err = srv.Send(resp); err != nil { return togRPCError(err) @@ -166,7 +174,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance zap.Int64("total-bytes", total), zap.Int("checksum-size", len(sha)), ) - hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha} + hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion} if err := srv.Send(hresp); err != nil { return togRPCError(err) } @@ -175,6 +183,7 @@ func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance zap.Int64("total-bytes", total), zap.String("size", size), zap.String("took", humanize.Time(start)), + zap.String("storage-version", storageVersion), ) return nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go index 7f53bd966b..fd41bc1333 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go @@ -21,6 +21,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/storage" ) type quotaKVServer struct { @@ -29,7 +30,7 @@ type quotaKVServer struct { } type quotaAlarmer struct { - q etcdserver.Quota + q storage.Quota a Alarmer id types.ID } @@ -52,7 +53,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()}, + quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "kv"), s, s.ID()}, } } @@ -85,6 +86,6 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()}, + quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "lease"), s, s.ID()}, } } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go index f61fae03b9..cef6476bc4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go @@ -23,8 +23,9 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -58,11 +59,11 @@ var toGRPCErrorMap = map[error]error{ etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, - etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, - etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, - etcdserver.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, - etcdserver.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, - etcdserver.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, + etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, + etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, + version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, + version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, + version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, @@ -84,6 +85,7 @@ var toGRPCErrorMap = map[error]error{ auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled, auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken, auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt, + auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision, // In sync with status.FromContextError context.Canceled: rpctypes.ErrGRPCCanceled, diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go index c56ec4b26e..b8466354b1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" ) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go index 5a77ef3773..7eb53ebcc6 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply.go @@ -31,8 +31,10 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/mvcc" "github.com/gogo/protobuf/proto" "go.uber.org/zap" @@ -146,15 +148,15 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 member case r.ClusterVersionSet != nil: // Implemented in 3.5.x op = "ClusterVersionSet" a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) - return nil + return ar case r.ClusterMemberAttrSet != nil: op = "ClusterMemberAttrSet" // Implemented in 3.5.x a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) - return nil + return ar case r.DowngradeInfoSet != nil: op = "DowngradeInfoSet" // Implemented in 3.5.x a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) - return nil + return ar } if !shouldApplyV3 { @@ -335,6 +337,8 @@ func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.Ra resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} + lg := a.s.Logger() + if txn == nil { txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace) defer txn.End() @@ -386,6 +390,11 @@ func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.Ra // sorted by keys in lexiographically ascending order, // sort ASCEND by default only when target is not 'KEY' sortOrder = pb.RangeRequest_ASCEND + } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // don't re-sort when target is 'KEY' and order is ASCEND + sortOrder = pb.RangeRequest_NONE } if sortOrder != pb.RangeRequest_NONE { var sorter sort.Interface @@ -400,6 +409,8 @@ func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.Ra sorter = &kvSortByMod{&kvSort{rr.KVs}} case r.SortTarget == pb.RangeRequest_VALUE: sorter = &kvSortByValue{&kvSort{rr.KVs}} + default: + lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget))) } switch { case sortOrder == pb.RangeRequest_ASCEND: @@ -770,7 +781,7 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) type applierV3Capped struct { applierV3 - q backendQuota + q serverstorage.BackendQuota } // newApplierV3Capped creates an applyV3 that will reject Puts and transactions @@ -925,7 +936,20 @@ func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleList } func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - a.s.cluster.SetVersion(semver.Must(semver.NewVersion(r.Ver)), api.UpdateCapability, shouldApplyV3) + prevVersion := a.s.Cluster().Version() + newVersion := semver.Must(semver.NewVersion(r.Ver)) + a.s.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3) + // Force snapshot after cluster version downgrade. + if prevVersion != nil && newVersion.LessThan(*prevVersion) { + lg := a.s.Logger() + if lg != nil { + lg.Info("Cluster version downgrade detected, forcing snapshot", + zap.String("prev-cluster-version", prevVersion.String()), + zap.String("new-cluster-version", newVersion.String()), + ) + } + a.s.forceSnapshot = true + } } func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { @@ -940,20 +964,20 @@ func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAtt } func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - d := membership.DowngradeInfo{Enabled: false} + d := version.DowngradeInfo{Enabled: false} if r.Enabled { - d = membership.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} + d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} } a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3) } type quotaApplierV3 struct { applierV3 - q Quota + q serverstorage.Quota } func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, NewBackendQuota(s, "v3-applier")} + return "aApplierV3{app, serverstorage.NewBackendQuota(s.Cfg, s.Backend(), "v3-applier")} } func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go index 74fd2b4fc3..bf043aa731 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_auth.go @@ -23,7 +23,7 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) type authApplierV3 struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go new file mode 100644 index 0000000000..e0b747f1d4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go @@ -0,0 +1,718 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/dustin/go-humanize" + "go.uber.org/zap" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/client/pkg/v3/fileutil" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/pkg/v3/pbutil" + "go.etcd.io/etcd/raft/v3" + "go.etcd.io/etcd/raft/v3/raftpb" + "go.etcd.io/etcd/server/v3/config" + "go.etcd.io/etcd/server/v3/etcdserver/api" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" + "go.etcd.io/etcd/server/v3/etcdserver/api/snap" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" + "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" + "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" +) + +func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) { + + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + cfg.Logger.Warn( + "exceeded recommended request limit", + zap.Uint("max-request-bytes", cfg.MaxRequestBytes), + zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), + zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), + zap.String("recommended-request-size", recommendedMaxRequestBytesString), + ) + } + + if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil { + return nil, fmt.Errorf("cannot access data directory: %v", terr) + } + + if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil { + return nil, fmt.Errorf("cannot access member directory: %v", terr) + } + ss := bootstrapSnapshot(cfg) + prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) + if err != nil { + return nil, err + } + + haveWAL := wal.Exist(cfg.WALDir()) + st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) + backend, err := bootstrapBackend(cfg, haveWAL, st, ss) + if err != nil { + return nil, err + } + var ( + bwal *bootstrappedWAL + ) + + if haveWAL { + if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { + return nil, fmt.Errorf("cannot write to WAL directory: %v", err) + } + bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot) + } + + cluster, err := bootstrapCluster(cfg, bwal, prt) + if err != nil { + backend.Close() + return nil, err + } + + s, err := bootstrapStorage(cfg, st, backend, bwal, cluster) + if err != nil { + backend.Close() + return nil, err + } + + err = cluster.Finalize(cfg, s) + if err != nil { + backend.Close() + return nil, err + } + raft := bootstrapRaft(cfg, cluster, s.wal) + return &bootstrappedServer{ + prt: prt, + ss: ss, + storage: s, + cluster: cluster, + raft: raft, + }, nil +} + +type bootstrappedServer struct { + storage *bootstrappedStorage + cluster *bootstrapedCluster + raft *bootstrappedRaft + prt http.RoundTripper + ss *snap.Snapshotter +} + +func (s *bootstrappedServer) Close() { + s.storage.Close() +} + +type bootstrappedStorage struct { + backend *bootstrappedBackend + wal *bootstrappedWAL + st v2store.Store +} + +func (s *bootstrappedStorage) Close() { + s.backend.Close() +} + +type bootstrappedBackend struct { + beHooks *serverstorage.BackendHooks + be backend.Backend + ci cindex.ConsistentIndexer + beExist bool + snapshot *raftpb.Snapshot +} + +func (s *bootstrappedBackend) Close() { + s.be.Close() +} + +type bootstrapedCluster struct { + remotes []*membership.Member + cl *membership.RaftCluster + nodeID types.ID +} + +type bootstrappedRaft struct { + lg *zap.Logger + heartbeat time.Duration + + peers []raft.Peer + config *raft.Config + storage *raft.MemoryStorage +} + +func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrapedCluster) (b *bootstrappedStorage, err error) { + if wal == nil { + wal = bootstrapNewWAL(cfg, cl) + } + + return &bootstrappedStorage{ + backend: be, + st: st, + wal: wal, + }, nil +} + +func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter { + if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil { + cfg.Logger.Fatal( + "failed to create snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + + if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { + return strings.HasPrefix(fileName, "tmp") + }); err != nil { + cfg.Logger.Error( + "failed to remove temp file(s) in snapshot directory", + zap.String("path", cfg.SnapDir()), + zap.Error(err), + ) + } + return snap.New(cfg.Logger, cfg.SnapDir()) +} + +func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) { + beExist := fileutil.Exist(cfg.BackendPath()) + ci := cindex.NewConsistentIndex(nil) + beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci) + be := serverstorage.OpenBackend(cfg, beHooks) + defer func() { + if err != nil && be != nil { + be.Close() + } + }() + ci.SetBackend(be) + schema.CreateMetaBucket(be.BatchTx()) + if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { + err = maybeDefragBackend(cfg, be) + if err != nil { + return nil, err + } + } + cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex())) + + // TODO(serathius): Implement schema setup in fresh storage + var ( + snapshot *raftpb.Snapshot + ) + if haveWAL { + snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss) + if err != nil { + return nil, err + } + } + if beExist { + err = schema.Validate(cfg.Logger, be.BatchTx()) + if err != nil { + cfg.Logger.Error("Failed to validate schema", zap.Error(err)) + return nil, err + } + } + + return &bootstrappedBackend{ + beHooks: beHooks, + be: be, + ci: ci, + beExist: beExist, + snapshot: snapshot, + }, nil +} + +func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { + size := be.Size() + sizeInUse := be.SizeInUse() + freeableMemory := uint(size - sizeInUse) + thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 + if freeableMemory < thresholdBytes { + cfg.Logger.Info("Skipping defragmentation", + zap.Int64("current-db-size-bytes", size), + zap.String("current-db-size", humanize.Bytes(uint64(size))), + zap.Int64("current-db-size-in-use-bytes", sizeInUse), + zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), + zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), + zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), + ) + return nil + } + return be.Defrag() +} + +func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrapedCluster, err error) { + switch { + case bwal == nil && !cfg.NewCluster: + c, err = bootstrapExistingClusterNoWAL(cfg, prt) + case bwal == nil && cfg.NewCluster: + c, err = bootstrapNewClusterNoWAL(cfg, prt) + case bwal != nil && bwal.haveWAL: + c, err = bootstrapClusterWithWAL(cfg, bwal.meta) + default: + return nil, fmt.Errorf("unsupported bootstrap config") + } + if err != nil { + return nil, err + } + return c, nil +} + +func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) { + if err := cfg.VerifyJoinExisting(); err != nil { + return nil, err + } + cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + if err != nil { + return nil, err + } + existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) + if gerr != nil { + return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) + } + if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { + return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) + } + if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) { + return nil, fmt.Errorf("incompatible with current running cluster") + } + scaleUpLearners := false + if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, existingCluster.Members(), scaleUpLearners); err != nil { + return nil, err + } + remotes := existingCluster.Members() + cl.SetID(types.ID(0), existingCluster.ID()) + member := cl.MemberByName(cfg.Name) + return &bootstrapedCluster{ + remotes: remotes, + cl: cl, + nodeID: member.ID, + }, nil +} + +func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrapedCluster, error) { + if err := cfg.VerifyBootstrap(); err != nil { + return nil, err + } + cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + if err != nil { + return nil, err + } + m := cl.MemberByName(cfg.Name) + if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) { + return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) + } + if cfg.ShouldDiscover() { + var str string + if cfg.DiscoveryURL != "" { + cfg.Logger.Warn("V2 discovery is deprecated!") + str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) + } else { + cfg.Logger.Info("Bootstrapping cluster using v3 discovery.") + str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String()) + } + if err != nil { + return nil, &DiscoveryError{Op: "join", Err: err} + } + var urlsmap types.URLsMap + urlsmap, err = types.NewURLsMap(str) + if err != nil { + return nil, err + } + if config.CheckDuplicateURL(urlsmap) { + return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) + } + if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)); err != nil { + return nil, err + } + } + return &bootstrapedCluster{ + remotes: nil, + cl: cl, + nodeID: m.ID, + }, nil +} + +func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrapedCluster, error) { + if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { + return nil, fmt.Errorf("cannot write to member directory: %v", err) + } + + if cfg.ShouldDiscover() { + cfg.Logger.Warn( + "discovery token is ignored since cluster already initialized; valid logs are found", + zap.String("wal-dir", cfg.WALDir()), + ) + } + cl := membership.NewCluster(cfg.Logger, membership.WithMaxLearners(cfg.ExperimentalMaxLearners)) + + scaleUpLearners := false + if err := membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, cl.Members(), scaleUpLearners); err != nil { + return nil, err + } + + cl.SetID(meta.nodeID, meta.clusterID) + return &bootstrapedCluster{ + cl: cl, + nodeID: meta.nodeID, + }, nil +} + +func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) { + // Find a snapshot to start/restart a raft node + walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) + if err != nil { + return nil, be, err + } + // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding + // bwal log entries + snapshot, err := ss.LoadNewestAvailable(walSnaps) + if err != nil && err != snap.ErrNoSnapshot { + return nil, be, err + } + + if snapshot != nil { + if err = st.Recovery(snapshot.Data); err != nil { + cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) + } + + if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil { + cfg.Logger.Error("illegal v2store content", zap.Error(err)) + return nil, be, err + } + + cfg.Logger.Info( + "recovered v2 store from snapshot", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), + ) + + if be, err = serverstorage.RecoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil { + cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) + } + // A snapshot db may have already been recovered, and the old db should have + // already been closed in this case, so we should set the backend again. + ci.SetBackend(be) + + s1, s2 := be.Size(), be.SizeInUse() + cfg.Logger.Info( + "recovered v3 backend from snapshot", + zap.Int64("backend-size-bytes", s1), + zap.String("backend-size", humanize.Bytes(uint64(s1))), + zap.Int64("backend-size-in-use-bytes", s2), + zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), + ) + if beExist { + // TODO: remove kvindex != 0 checking when we do not expect users to upgrade + // etcd from pre-3.0 release. + kvindex := ci.ConsistentIndex() + if kvindex < snapshot.Metadata.Index { + if kvindex != 0 { + return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index) + } + cfg.Logger.Warn( + "consistent index was never saved", + zap.Uint64("snapshot-index", snapshot.Metadata.Index), + ) + } + } + } else { + cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") + } + return snapshot, be, nil +} + +func (c *bootstrapedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error { + if !s.wal.haveWAL { + c.cl.SetID(c.nodeID, c.cl.ID()) + } + c.cl.SetStore(s.st) + c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be)) + if s.wal.haveWAL { + c.cl.Recover(api.UpdateCapability) + if c.databaseFileMissing(s) { + bepath := cfg.BackendPath() + os.RemoveAll(bepath) + return fmt.Errorf("database file (%v) of the backend is missing", bepath) + } + } + scaleUpLearners := false + return membership.ValidateMaxLearnerConfig(cfg.ExperimentalMaxLearners, c.cl.Members(), scaleUpLearners) +} + +func (c *bootstrapedCluster) databaseFileMissing(s *bootstrappedStorage) bool { + v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3}) + return v3Cluster && !s.backend.beExist +} + +func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrapedCluster, bwal *bootstrappedWAL) *bootstrappedRaft { + switch { + case !bwal.haveWAL && !cfg.NewCluster: + return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal) + case !bwal.haveWAL && cfg.NewCluster: + return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal) + case bwal.haveWAL: + return bootstrapRaftFromWAL(cfg, bwal) + default: + cfg.Logger.Panic("unsupported bootstrap config") + return nil + } +} + +func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft { + member := cl.MemberByName(cfg.Name) + peers := make([]raft.Peer, len(ids)) + for i, id := range ids { + var ctx []byte + ctx, err := json.Marshal((*cl).Member(id)) + if err != nil { + cfg.Logger.Panic("failed to marshal member", zap.Error(err)) + } + peers[i] = raft.Peer{ID: uint64(id), Context: ctx} + } + cfg.Logger.Info( + "starting local member", + zap.String("local-member-id", member.ID.String()), + zap.String("cluster-id", cl.ID().String()), + ) + s := bwal.MemoryStorage() + return &bootstrappedRaft{ + lg: cfg.Logger, + heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, + config: raftConfig(cfg, uint64(member.ID), s), + peers: peers, + storage: s, + } +} + +func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft { + s := bwal.MemoryStorage() + return &bootstrappedRaft{ + lg: cfg.Logger, + heartbeat: time.Duration(cfg.TickMs) * time.Millisecond, + config: raftConfig(cfg, uint64(bwal.meta.nodeID), s), + storage: s, + } +} + +func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config { + return &raft.Config{ + ID: id, + ElectionTick: cfg.ElectionTicks, + HeartbeatTick: 1, + Storage: s, + MaxSizePerMsg: maxSizePerMsg, + MaxInflightMsgs: maxInflightMsgs, + CheckQuorum: true, + PreVote: cfg.PreVote, + Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), + } +} + +func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode { + var n raft.Node + if len(b.peers) == 0 { + n = raft.RestartNode(b.config) + } else { + n = raft.StartNode(b.config, b.peers) + } + raftStatusMu.Lock() + raftStatus = n.Status + raftStatusMu.Unlock() + return newRaftNode( + raftNodeConfig{ + lg: b.lg, + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: b.heartbeat, + raftStorage: b.storage, + storage: serverstorage.NewStorage(b.lg, wal, ss), + }, + ) +} + +func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) *bootstrappedWAL { + wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot) + bwal := &bootstrappedWAL{ + lg: cfg.Logger, + w: wal, + st: st, + ents: ents, + snapshot: snap, + meta: meta, + haveWAL: true, + } + + if cfg.ForceNewCluster { + // discard the previously uncommitted entries + bwal.ents = bwal.CommitedEntries() + entries := bwal.NewConfigChangeEntries() + // force commit config change entries + bwal.AppendAndCommitEntries(entries) + cfg.Logger.Info( + "forcing restart member", + zap.String("cluster-id", meta.clusterID.String()), + zap.String("local-member-id", meta.nodeID.String()), + zap.Uint64("commit-index", bwal.st.Commit), + ) + } else { + cfg.Logger.Info( + "restarting local member", + zap.String("cluster-id", meta.clusterID.String()), + zap.String("local-member-id", meta.nodeID.String()), + zap.Uint64("commit-index", bwal.st.Commit), + ) + } + return bwal +} + +// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear +// after the position of the given snap in the WAL. +// The snap must have been previously saved to the WAL, or this call will panic. +func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) { + var walsnap walpb.Snapshot + if snapshot != nil { + walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term + } + repaired := false + for { + w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap) + if err != nil { + cfg.Logger.Fatal("failed to open WAL", zap.Error(err)) + } + if cfg.UnsafeNoFsync { + w.SetUnsafeNoFsync() + } + wmetadata, st, ents, err := w.ReadAll() + if err != nil { + w.Close() + // we can only repair ErrUnexpectedEOF and we never repair twice. + if repaired || err != io.ErrUnexpectedEOF { + cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err)) + } + if !wal.Repair(cfg.Logger, cfg.WALDir()) { + cfg.Logger.Fatal("failed to repair WAL", zap.Error(err)) + } else { + cfg.Logger.Info("repaired WAL", zap.Error(err)) + repaired = true + } + continue + } + var metadata etcdserverpb.Metadata + pbutil.MustUnmarshal(&metadata, wmetadata) + id := types.ID(metadata.NodeID) + cid := types.ID(metadata.ClusterID) + meta := &snapshotMetadata{clusterID: cid, nodeID: id} + return w, &st, ents, snapshot, meta + } +} + +type snapshotMetadata struct { + nodeID, clusterID types.ID +} + +func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrapedCluster) *bootstrappedWAL { + metadata := pbutil.MustMarshal( + &etcdserverpb.Metadata{ + NodeID: uint64(cl.nodeID), + ClusterID: uint64(cl.cl.ID()), + }, + ) + w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata) + if err != nil { + cfg.Logger.Panic("failed to create WAL", zap.Error(err)) + } + if cfg.UnsafeNoFsync { + w.SetUnsafeNoFsync() + } + return &bootstrappedWAL{ + lg: cfg.Logger, + w: w, + } +} + +type bootstrappedWAL struct { + lg *zap.Logger + + haveWAL bool + w *wal.WAL + st *raftpb.HardState + ents []raftpb.Entry + snapshot *raftpb.Snapshot + meta *snapshotMetadata +} + +func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage { + s := raft.NewMemoryStorage() + if wal.snapshot != nil { + s.ApplySnapshot(*wal.snapshot) + } + if wal.st != nil { + s.SetHardState(*wal.st) + } + if len(wal.ents) != 0 { + s.Append(wal.ents) + } + return s +} + +func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry { + for i, ent := range wal.ents { + if ent.Index > wal.st.Commit { + wal.lg.Info( + "discarding uncommitted WAL entries", + zap.Uint64("entry-index", ent.Index), + zap.Uint64("commit-index-from-wal", wal.st.Commit), + zap.Int("number-of-discarded-entries", len(wal.ents)-i), + ) + return wal.ents[:i] + } + } + return wal.ents +} + +func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry { + return serverstorage.CreateConfigChangeEnts( + wal.lg, + serverstorage.GetEffectiveNodeIDsFromWalEntries(wal.lg, wal.snapshot, wal.ents), + uint64(wal.meta.nodeID), + wal.st.Term, + wal.st.Commit, + ) +} + +func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) { + wal.ents = append(wal.ents, ents...) + err := wal.w.Save(raftpb.HardState{}, ents) + if err != nil { + wal.lg.Fatal("failed to save hard state and entries", zap.Error(err)) + } + if len(wal.ents) != 0 { + wal.st.Commit = wal.ents[len(wal.ents)-1].Index + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go index 64b98b6fff..24dad66031 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go @@ -15,12 +15,11 @@ package cindex import ( - "encoding/binary" "sync" "sync/atomic" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) type Backend interface { @@ -74,7 +73,7 @@ func (ci *consistentIndex) ConsistentIndex() uint64 { ci.mutex.Lock() defer ci.mutex.Unlock() - v, term := ReadConsistentIndex(ci.be.BatchTx()) + v, term := schema.ReadConsistentIndex(ci.be.BatchTx()) ci.SetConsistentIndex(v, term) return v } @@ -87,7 +86,7 @@ func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) { func (ci *consistentIndex) UnsafeSave(tx backend.BatchTx) { index := atomic.LoadUint64(&ci.consistentIndex) term := atomic.LoadUint64(&ci.term) - UnsafeUpdateConsistentIndex(tx, index, term, true) + schema.UnsafeUpdateConsistentIndex(tx, index, term, true) } func (ci *consistentIndex) SetBackend(be Backend) { @@ -117,73 +116,8 @@ func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) { func (f *fakeConsistentIndex) UnsafeSave(_ backend.BatchTx) {} func (f *fakeConsistentIndex) SetBackend(_ Backend) {} -// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet). -func UnsafeCreateMetaBucket(tx backend.BatchTx) { - tx.UnsafeCreateBucket(buckets.Meta) -} - -// CreateMetaBucket creates the `meta` bucket (if it does not exists yet). -func CreateMetaBucket(tx backend.BatchTx) { - tx.Lock() - defer tx.Unlock() - tx.UnsafeCreateBucket(buckets.Meta) -} - -// unsafeGetConsistentIndex loads consistent index & term from given transaction. -// returns 0,0 if the data are not found. -// Term is persisted since v3.5. -func unsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - _, vs := tx.UnsafeRange(buckets.Meta, buckets.MetaConsistentIndexKeyName, nil, 0) - if len(vs) == 0 { - return 0, 0 - } - v := binary.BigEndian.Uint64(vs[0]) - _, ts := tx.UnsafeRange(buckets.Meta, buckets.MetaTermKeyName, nil, 0) - if len(ts) == 0 { - return v, 0 - } - t := binary.BigEndian.Uint64(ts[0]) - return v, t -} - -// ReadConsistentIndex loads consistent index and term from given transaction. -// returns 0 if the data are not found. -func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { - tx.Lock() - defer tx.Unlock() - return unsafeReadConsistentIndex(tx) -} - -func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { - if index == 0 { - // Never save 0 as it means that we didn't loaded the real index yet. - return - } - - if onlyGrow { - oldi, oldTerm := unsafeReadConsistentIndex(tx) - if term < oldTerm { - return - } - if term == oldTerm && index <= oldi { - return - } - } - - bs1 := make([]byte, 8) - binary.BigEndian.PutUint64(bs1, index) - // put the index into the underlying backend - // tx has been locked in TxnBegin, so there is no need to lock it again - tx.UnsafePut(buckets.Meta, buckets.MetaConsistentIndexKeyName, bs1) - if term > 0 { - bs2 := make([]byte, 8) - binary.BigEndian.PutUint64(bs2, term) - tx.UnsafePut(buckets.Meta, buckets.MetaTermKeyName, bs2) - } -} - func UpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { tx.Lock() defer tx.Unlock() - UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow) + schema.UnsafeUpdateConsistentIndex(tx, index, term, onlyGrow) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go index 595586e201..2f22568bce 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go @@ -18,7 +18,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "sort" "strconv" @@ -80,7 +80,7 @@ func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Durat } continue } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) resp.Body.Close() if err != nil { if logerr { @@ -134,11 +134,11 @@ func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string { return us } -// getVersions returns the versions of the members in the given cluster. +// getMembersVersions returns the versions of the members in the given cluster. // The key of the returned map is the member's ID. The value of the returned map // is the semver versions string, including server and cluster. // If it fails to get the version of a member, the key will be nil. -func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { +func getMembersVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions { members := cl.Members() vers := make(map[string]*version.Versions) for _, m := range members { @@ -161,44 +161,6 @@ func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt return vers } -// decideClusterVersion decides the cluster version based on the versions map. -// The returned version is the min server version in the map, or nil if the min -// version in unknown. -func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version { - var cv *semver.Version - lv := semver.Must(semver.NewVersion(version.Version)) - - for mid, ver := range vers { - if ver == nil { - return nil - } - v, err := semver.NewVersion(ver.Server) - if err != nil { - lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return nil - } - if lv.LessThan(*v) { - lg.Warn( - "leader found higher-versioned member", - zap.String("local-member-version", lv.String()), - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - ) - } - if cv == nil { - cv = v - } else if v.LessThan(*cv) { - cv = v - } - } - return cv -} - // allowedVersionRange decides the available version range of the cluster that local server can join in; // if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher] // if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion] @@ -222,7 +184,7 @@ func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *sem // out of the range. // We set this rule since when the local member joins, another member might be offline. func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool { - vers := getVersions(lg, cl, local, rt) + vers := getMembersVersions(lg, cl, local, rt) minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt)) return isCompatibleWithVers(lg, vers, local, minV, maxV) } @@ -294,7 +256,7 @@ func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*ve continue } var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) resp.Body.Close() if err != nil { lg.Warn( @@ -335,7 +297,7 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R return nil, err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } @@ -412,7 +374,7 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip continue } var b []byte - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) resp.Body.Close() if err != nil { lg.Warn( @@ -438,35 +400,6 @@ func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTrip return false, err } -// isMatchedVersions returns true if all server versions are equal to target version, otherwise return false. -// It can be used to decide the whether the cluster finishes downgrading to target version. -func isMatchedVersions(lg *zap.Logger, targetVersion *semver.Version, vers map[string]*version.Versions) bool { - for mid, ver := range vers { - if ver == nil { - return false - } - v, err := semver.NewVersion(ver.Cluster) - if err != nil { - lg.Warn( - "failed to parse server version of remote member", - zap.String("remote-member-id", mid), - zap.String("remote-member-version", ver.Server), - zap.Error(err), - ) - return false - } - if !targetVersion.Equal(*v) { - lg.Warn("remotes server has mismatching etcd version", - zap.String("remote-member-id", mid), - zap.String("current-server-version", v.String()), - zap.String("target-version", targetVersion.String()), - ) - return false - } - } - return true -} - func convertToClusterVersion(v string) (*semver.Version, error) { ver, err := semver.NewVersion(v) if err != nil { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go index 3a4bab6d5c..81288d5cba 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go @@ -19,7 +19,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" @@ -28,7 +28,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" ) @@ -360,7 +360,7 @@ func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) return @@ -417,7 +417,7 @@ func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int6 return nil, err } defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return nil, err } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go index dc2a85fdd4..9d9b07e13a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors.go @@ -20,30 +20,27 @@ import ( ) var ( - ErrUnknownMethod = errors.New("etcdserver: unknown method") - ErrStopped = errors.New("etcdserver: server stopped") - ErrCanceled = errors.New("etcdserver: request cancelled") - ErrTimeout = errors.New("etcdserver: request timed out") - ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") - ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") - ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") - ErrLeaderChanged = errors.New("etcdserver: leader changed") - ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") - ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") - ErrNoLeader = errors.New("etcdserver: no leader") - ErrNotLeader = errors.New("etcdserver: not leader") - ErrRequestTooLarge = errors.New("etcdserver: request is too large") - ErrNoSpace = errors.New("etcdserver: no space") - ErrTooManyRequests = errors.New("etcdserver: too many requests") - ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") - ErrCorrupt = errors.New("etcdserver: corrupt cluster") - ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") - ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") - ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") - ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") - ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") - ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") + ErrUnknownMethod = errors.New("etcdserver: unknown method") + ErrStopped = errors.New("etcdserver: server stopped") + ErrCanceled = errors.New("etcdserver: request cancelled") + ErrTimeout = errors.New("etcdserver: request timed out") + ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure") + ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost") + ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") + ErrLeaderChanged = errors.New("etcdserver: leader changed") + ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") + ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader") + ErrNoLeader = errors.New("etcdserver: no leader") + ErrNotLeader = errors.New("etcdserver: not leader") + ErrRequestTooLarge = errors.New("etcdserver: request is too large") + ErrNoSpace = errors.New("etcdserver: no space") + ErrTooManyRequests = errors.New("etcdserver: too many requests") + ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") + ErrCorrupt = errors.New("etcdserver: corrupt cluster") + ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") + ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") + ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") ) type DiscoveryError struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go index 06263a9cd2..33ee02747f 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go @@ -124,12 +124,7 @@ var ( Name: "lease_expired_total", Help: "The total number of expired leases.", }) - quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "quota_backend_bytes", - Help: "Current backend storage quota size in bytes.", - }) + currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "server", @@ -191,7 +186,6 @@ func init() { prometheus.MustRegister(slowReadIndex) prometheus.MustRegister(readIndexFailed) prometheus.MustRegister(leaseExpired) - prometheus.MustRegister(quotaBackendBytes) prometheus.MustRegister(currentVersion) prometheus.MustRegister(currentGoVersion) prometheus.MustRegister(serverID) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go index 8b9600d39c..69e6a8c216 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go @@ -15,26 +15,18 @@ package etcdserver import ( - "encoding/json" "expvar" "fmt" "log" - "sort" "sync" "time" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/logutil" - "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/contention" - "go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/config" - "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" + serverstorage "go.etcd.io/etcd/server/v3/storage" "go.uber.org/zap" ) @@ -111,7 +103,7 @@ type raftNodeConfig struct { isIDRemoved func(id uint64) bool raft.Node raftStorage *raft.MemoryStorage - storage Storage + storage serverstorage.Storage heartbeat time.Duration // for logging // transport specifies the transport to send and receive msgs to members. // Sending messages MUST NOT block. It is okay to drop messages, since @@ -419,271 +411,3 @@ func (r *raftNode) advanceTicks(ticks int) { r.tick() } } - -func startNode(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { - var err error - member := cl.MemberByName(cfg.Name) - metadata := pbutil.MustMarshal( - &pb.Metadata{ - NodeID: uint64(member.ID), - ClusterID: uint64(cl.ID()), - }, - ) - if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil { - cfg.Logger.Panic("failed to create WAL", zap.Error(err)) - } - if cfg.UnsafeNoFsync { - w.SetUnsafeNoFsync() - } - peers := make([]raft.Peer, len(ids)) - for i, id := range ids { - var ctx []byte - ctx, err = json.Marshal((*cl).Member(id)) - if err != nil { - cfg.Logger.Panic("failed to marshal member", zap.Error(err)) - } - peers[i] = raft.Peer{ID: uint64(id), Context: ctx} - } - id = member.ID - cfg.Logger.Info( - "starting local member", - zap.String("local-member-id", id.String()), - zap.String("cluster-id", cl.ID().String()), - ) - s = raft.NewMemoryStorage() - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - if len(peers) == 0 { - n = raft.RestartNode(c) - } else { - n = raft.StartNode(c, peers) - } - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - return id, n, s, w -} - -func restartNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) - - cfg.Logger.Info( - "restarting local member", - zap.String("cluster-id", cid.String()), - zap.String("local-member-id", id.String()), - zap.Uint64("commit-index", st.Commit), - ) - cl := membership.NewCluster(cfg.Logger) - cl.SetID(id, cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - - n := raft.RestartNode(c) - raftStatusMu.Lock() - raftStatus = n.Status - raftStatusMu.Unlock() - return id, cl, n, s, w -} - -func restartAsStandaloneNode(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { - var walsnap walpb.Snapshot - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap, cfg.UnsafeNoFsync) - - // discard the previously uncommitted entries - for i, ent := range ents { - if ent.Index > st.Commit { - cfg.Logger.Info( - "discarding uncommitted WAL entries", - zap.Uint64("entry-index", ent.Index), - zap.Uint64("commit-index-from-wal", st.Commit), - zap.Int("number-of-discarded-entries", len(ents)-i), - ) - ents = ents[:i] - break - } - } - - // force append the configuration change entries - toAppEnts := createConfigChangeEnts( - cfg.Logger, - getIDs(cfg.Logger, snapshot, ents), - uint64(id), - st.Term, - st.Commit, - ) - ents = append(ents, toAppEnts...) - - // force commit newly appended entries - err := w.Save(raftpb.HardState{}, toAppEnts) - if err != nil { - cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err)) - } - if len(ents) != 0 { - st.Commit = ents[len(ents)-1].Index - } - - cfg.Logger.Info( - "forcing restart member", - zap.String("cluster-id", cid.String()), - zap.String("local-member-id", id.String()), - zap.Uint64("commit-index", st.Commit), - ) - - cl := membership.NewCluster(cfg.Logger) - cl.SetID(id, cid) - s := raft.NewMemoryStorage() - if snapshot != nil { - s.ApplySnapshot(*snapshot) - } - s.SetHardState(st) - s.Append(ents) - c := &raft.Config{ - ID: uint64(id), - ElectionTick: cfg.ElectionTicks, - HeartbeatTick: 1, - Storage: s, - MaxSizePerMsg: maxSizePerMsg, - MaxInflightMsgs: maxInflightMsgs, - CheckQuorum: true, - PreVote: cfg.PreVote, - Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")), - } - - n := raft.RestartNode(c) - raftStatus = n.Status - return id, cl, n, s, w -} - -// getIDs returns an ordered set of IDs included in the given snapshot and -// the entries. The given snapshot/entries can contain three kinds of -// ID-related entry: -// - ConfChangeAddNode, in which case the contained ID will be added into the set. -// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set. -// - ConfChangeAddLearnerNode, in which the contained ID will be added into the set. -func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { - ids := make(map[uint64]bool) - if snap != nil { - for _, id := range snap.Metadata.ConfState.Voters { - ids[id] = true - } - } - for _, e := range ents { - if e.Type != raftpb.EntryConfChange { - continue - } - var cc raftpb.ConfChange - pbutil.MustUnmarshal(&cc, e.Data) - switch cc.Type { - case raftpb.ConfChangeAddLearnerNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeAddNode: - ids[cc.NodeID] = true - case raftpb.ConfChangeRemoveNode: - delete(ids, cc.NodeID) - case raftpb.ConfChangeUpdateNode: - // do nothing - default: - lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) - } - } - sids := make(types.Uint64Slice, 0, len(ids)) - for id := range ids { - sids = append(sids, id) - } - sort.Sort(sids) - return []uint64(sids) -} - -// createConfigChangeEnts creates a series of Raft entries (i.e. -// EntryConfChange) to remove the set of given IDs from the cluster. The ID -// `self` is _not_ removed, even if present in the set. -// If `self` is not inside the given ids, it creates a Raft entry to add a -// default member with the given `self`. -func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { - found := false - for _, id := range ids { - if id == self { - found = true - } - } - - var ents []raftpb.Entry - next := index + 1 - - // NB: always add self first, then remove other nodes. Raft will panic if the - // set of voters ever becomes empty. - if !found { - m := membership.Member{ - ID: types.ID(self), - RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, - } - ctx, err := json.Marshal(m) - if err != nil { - lg.Panic("failed to marshal member", zap.Error(err)) - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeAddNode, - NodeID: self, - Context: ctx, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - for _, id := range ids { - if id == self { - continue - } - cc := &raftpb.ConfChange{ - Type: raftpb.ConfChangeRemoveNode, - NodeID: id, - } - e := raftpb.Entry{ - Type: raftpb.EntryConfChange, - Data: pbutil.MustMarshal(cc), - Term: term, - Index: next, - } - ents = append(ents, e) - next++ - } - - return ents -} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go index 56e288cc5f..d10559a4e2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go @@ -22,11 +22,9 @@ import ( "math" "math/rand" "net/http" - "os" "path" "regexp" "strconv" - "strings" "sync" "sync/atomic" "time" @@ -34,6 +32,7 @@ import ( "github.com/coreos/go-semver/semver" humanize "github.com/dustin/go-humanize" "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/server/v3/config" "go.uber.org/zap" @@ -55,18 +54,19 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" "go.etcd.io/etcd/server/v3/etcdserver/api/v2http/httptypes" stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" - "go.etcd.io/etcd/server/v3/mvcc" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/wal" + serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.etcd.io/etcd/server/v3/storage/schema" ) const ( @@ -142,7 +142,6 @@ type ServerV2 interface { // Do takes a V2 request and attempts to fulfill it, returning a Response. Do(ctx context.Context, r pb.Request) (Response, error) - stats.Stats ClientCertAuthEnabled() bool } @@ -234,8 +233,7 @@ type EtcdServer struct { // done is closed when all goroutines from start() complete. done chan struct{} // leaderChanged is used to notify the linearizable read loop to drop the old read requests. - leaderChanged chan struct{} - leaderChangedMu sync.RWMutex + leaderChanged *notify.Notifier errorc chan error id types.ID @@ -260,7 +258,7 @@ type EtcdServer struct { lessor lease.Lessor bemu sync.Mutex be backend.Backend - beHooks *backendHooks + beHooks *serverstorage.BackendHooks authStore auth.AuthStore alarmStore *v3alarm.AlarmStore @@ -289,312 +287,69 @@ type EtcdServer struct { leadTimeMu sync.RWMutex leadElectedTime time.Time - firstCommitInTermMu sync.RWMutex - firstCommitInTermC chan struct{} + firstCommitInTerm *notify.Notifier + clusterVersionChanged *notify.Notifier *AccessController -} - -type backendHooks struct { - indexer cindex.ConsistentIndexer - lg *zap.Logger - - // confState to be written in the next submitted backend transaction (if dirty) - confState raftpb.ConfState - // first write changes it to 'dirty'. false by default, so - // not initialized `confState` is meaningless. - confStateDirty bool - confStateLock sync.Mutex -} - -func (bh *backendHooks) OnPreCommitUnsafe(tx backend.BatchTx) { - bh.indexer.UnsafeSave(tx) - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - if bh.confStateDirty { - membership.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) - // save bh.confState - bh.confStateDirty = false - } -} - -func (bh *backendHooks) SetConfState(confState *raftpb.ConfState) { - bh.confStateLock.Lock() - defer bh.confStateLock.Unlock() - bh.confState = *confState - bh.confStateDirty = true + // forceSnapshot can force snapshot be triggered after apply, independent of the snapshotCount. + // Should only be set within apply code path. Used to force snapshot after cluster version downgrade. + forceSnapshot bool } // NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { - st := v2store.New(StoreClusterPrefix, StoreKeysPrefix) - - var ( - w *wal.WAL - n raft.Node - s *raft.MemoryStorage - id types.ID - cl *membership.RaftCluster - ) - - if cfg.MaxRequestBytes > recommendedMaxRequestBytes { - cfg.Logger.Warn( - "exceeded recommended request limit", - zap.Uint("max-request-bytes", cfg.MaxRequestBytes), - zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))), - zap.Int("recommended-request-bytes", recommendedMaxRequestBytes), - zap.String("recommended-request-size", recommendedMaxRequestBytesString), - ) - } - - if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { - return nil, fmt.Errorf("cannot access data directory: %v", terr) - } - - haveWAL := wal.Exist(cfg.WALDir()) - - if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil { - cfg.Logger.Fatal( - "failed to create snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - - if err = fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool { - return strings.HasPrefix(fileName, "tmp") - }); err != nil { - cfg.Logger.Error( - "failed to remove temp file(s) in snapshot directory", - zap.String("path", cfg.SnapDir()), - zap.Error(err), - ) - } - - ss := snap.New(cfg.Logger, cfg.SnapDir()) - - bepath := cfg.BackendPath() - beExist := fileutil.Exist(bepath) - - ci := cindex.NewConsistentIndex(nil) - beHooks := &backendHooks{lg: cfg.Logger, indexer: ci} - be := openBackend(cfg, beHooks) - ci.SetBackend(be) - cindex.CreateMetaBucket(be.BatchTx()) - - if cfg.ExperimentalBootstrapDefragThresholdMegabytes != 0 { - err := maybeDefragBackend(cfg, be) - if err != nil { - return nil, err - } - } - - defer func() { - if err != nil { - be.Close() - } - }() - - prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout()) + b, err := bootstrap(cfg) if err != nil { return nil, err } - var ( - remotes []*membership.Member - snapshot *raftpb.Snapshot - ) - - switch { - case !haveWAL && !cfg.NewCluster: - if err = cfg.VerifyJoinExisting(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt) - if gerr != nil { - return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr) - } - if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil { - return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err) - } - if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) { - return nil, fmt.Errorf("incompatible with current running cluster") - } - - remotes = existingCluster.Members() - cl.SetID(types.ID(0), existingCluster.ID()) - cl.SetStore(st) - cl.SetBackend(be) - id, n, s, w = startNode(cfg, cl, nil) - cl.SetID(id, existingCluster.ID()) - case !haveWAL && cfg.NewCluster: - if err = cfg.VerifyBootstrap(); err != nil { - return nil, err - } - cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap) - if err != nil { - return nil, err - } - m := cl.MemberByName(cfg.Name) - if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) { - return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID) - } - if cfg.ShouldDiscover() { - var str string - str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String()) - if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} - } - var urlsmap types.URLsMap - urlsmap, err = types.NewURLsMap(str) - if err != nil { - return nil, err - } - if config.CheckDuplicateURL(urlsmap) { - return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap) - } - if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil { - return nil, err - } - } - cl.SetStore(st) - cl.SetBackend(be) - id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) - cl.SetID(id, cl.ID()) - - case haveWAL: - if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { - return nil, fmt.Errorf("cannot write to member directory: %v", err) - } - - if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil { - return nil, fmt.Errorf("cannot write to WAL directory: %v", err) - } - - if cfg.ShouldDiscover() { - cfg.Logger.Warn( - "discovery token is ignored since cluster already initialized; valid logs are found", - zap.String("wal-dir", cfg.WALDir()), - ) - } - - // Find a snapshot to start/restart a raft node - walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir()) + defer func() { if err != nil { - return nil, err + b.Close() } - // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding - // wal log entries - snapshot, err := ss.LoadNewestAvailable(walSnaps) - if err != nil && err != snap.ErrNoSnapshot { - return nil, err - } - - if snapshot != nil { - if err = st.Recovery(snapshot.Data); err != nil { - cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err)) - } - - if err = assertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil { - cfg.Logger.Error("illegal v2store content", zap.Error(err)) - return nil, err - } - - cfg.Logger.Info( - "recovered v2 store from snapshot", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))), - ) - - if be, err = recoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil { - cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err)) - } - s1, s2 := be.Size(), be.SizeInUse() - cfg.Logger.Info( - "recovered v3 backend from snapshot", - zap.Int64("backend-size-bytes", s1), - zap.String("backend-size", humanize.Bytes(uint64(s1))), - zap.Int64("backend-size-in-use-bytes", s2), - zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))), - ) - } else { - cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!") - } - - if !cfg.ForceNewCluster { - id, cl, n, s, w = restartNode(cfg, snapshot) - } else { - id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot) - } - - cl.SetStore(st) - cl.SetBackend(be) - cl.Recover(api.UpdateCapability) - if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist { - os.RemoveAll(bepath) - return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath) - } - - default: - return nil, fmt.Errorf("unsupported bootstrap config") - } - - if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil { - return nil, fmt.Errorf("cannot access member directory: %v", terr) - } + }() - sstats := stats.NewServerStats(cfg.Name, id.String()) - lstats := stats.NewLeaderStats(cfg.Logger, id.String()) + sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String()) + lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - lgMu: new(sync.RWMutex), - lg: cfg.Logger, - errorc: make(chan error, 1), - v2store: st, - snapshotter: ss, - r: *newRaftNode( - raftNodeConfig{ - lg: cfg.Logger, - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - }, - ), - id: id, - attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, - cluster: cl, - stats: sstats, - lstats: lstats, - SyncTicker: time.NewTicker(500 * time.Millisecond), - peerRt: prt, - reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), - AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, - consistIndex: ci, - firstCommitInTermC: make(chan struct{}), - } - serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1) - + readych: make(chan struct{}), + Cfg: cfg, + lgMu: new(sync.RWMutex), + lg: cfg.Logger, + errorc: make(chan error, 1), + v2store: b.storage.st, + snapshotter: b.ss, + r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl), + id: b.cluster.nodeID, + attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, + cluster: b.cluster.cl, + stats: sstats, + lstats: lstats, + SyncTicker: time.NewTicker(500 * time.Millisecond), + peerRt: b.prt, + reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()), + AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist}, + consistIndex: b.storage.backend.ci, + firstCommitInTerm: notify.NewNotifier(), + clusterVersionChanged: notify.NewNotifier(), + } + serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1) + srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged) srv.applyV2 = NewApplierV2(cfg.Logger, srv.v2store, srv.cluster) - srv.be = be - srv.beHooks = beHooks + srv.be = b.storage.backend.be + srv.beHooks = b.storage.backend.beHooks minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. - srv.lessor = lease.NewLessor(srv.Logger(), srv.be, lease.LessorConfig{ + srv.lessor = lease.NewLessor(srv.Logger(), srv.be, srv.cluster, lease.LessorConfig{ MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())), CheckpointInterval: cfg.LeaseCheckpointInterval, + CheckpointPersist: cfg.LeaseCheckpointPersist, ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(), }) @@ -608,25 +363,14 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { cfg.Logger.Warn("failed to create token provider", zap.Error(err)) return nil, err } - srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit}) - kvindex := ci.ConsistentIndex() - srv.lg.Debug("restore consistentIndex", zap.Uint64("index", kvindex)) - if beExist { - // TODO: remove kvindex != 0 checking when we do not expect users to upgrade - // etcd from pre-3.0 release. - if snapshot != nil && kvindex < snapshot.Metadata.Index { - if kvindex != 0 { - return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index) - } - cfg.Logger.Warn( - "consistent index was never saved", - zap.Uint64("snapshot-index", snapshot.Metadata.Index), - ) - } + mvccStoreConfig := mvcc.StoreConfig{ + CompactionBatchLimit: cfg.CompactionBatchLimit, + CompactionSleepInterval: cfg.CompactionSleepInterval, } + srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvccStoreConfig) - srv.authStore = auth.NewAuthStore(srv.Logger(), srv.be, tp, int(cfg.BcryptCost)) + srv.authStore = auth.NewAuthStore(srv.Logger(), schema.NewAuthBackend(srv.Logger(), srv.be), tp, int(cfg.BcryptCost)) newSrv := srv // since srv == nil in defer if srv is returned as nil defer func() { @@ -662,11 +406,11 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { Logger: cfg.Logger, TLSInfo: cfg.PeerTLSInfo, DialTimeout: cfg.PeerDialTimeout(), - ID: id, + ID: b.cluster.nodeID, URLs: cfg.PeerURLs, - ClusterID: cl.ID(), + ClusterID: b.cluster.cl.ID(), Raft: srv, - Snapshotter: ss, + Snapshotter: b.ss, ServerStats: sstats, LeaderStats: lstats, ErrorC: srv.errorc, @@ -675,13 +419,13 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { return nil, err } // add all remotes into transport - for _, m := range remotes { - if m.ID != id { + for _, m := range b.cluster.remotes { + if m.ID != b.cluster.nodeID { tr.AddRemote(m.ID, m.PeerURLs) } } - for _, m := range cl.Members() { - if m.ID != id { + for _, m := range b.cluster.cl.Members() { + if m.ID != b.cluster.nodeID { tr.AddPeer(m.ID, m.PeerURLs) } } @@ -690,23 +434,6 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { return srv, nil } -// assertNoV2StoreContent -> depending on the deprecation stage, warns or report an error -// if the v2store contains custom content. -func assertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { - metaOnly, err := membership.IsMetaStoreOnly(st) - if err != nil { - return err - } - if metaOnly { - return nil - } - if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { - return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) - } - lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") - return nil -} - func (s *EtcdServer) Logger() *zap.Logger { s.lgMu.RLock() l := s.lg @@ -787,12 +514,11 @@ func (s *EtcdServer) adjustTicks() { func (s *EtcdServer) Start() { s.start() s.GoAttach(func() { s.adjustTicks() }) - // TODO: Switch to publishV3 in 3.6. - // Support for cluster_member_set_attr was added in 3.5. - s.GoAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) + s.GoAttach(func() { s.publishV3(s.Cfg.ReqTimeout()) }) s.GoAttach(s.purgeFile) s.GoAttach(func() { monitorFileDescriptor(s.Logger(), s.stopping) }) - s.GoAttach(s.monitorVersions) + s.GoAttach(s.monitorClusterVersions) + s.GoAttach(s.monitorStorageVersion) s.GoAttach(s.linearizableReadLoop) s.GoAttach(s.monitorKVHash) s.GoAttach(s.monitorDowngrade) @@ -829,7 +555,7 @@ func (s *EtcdServer) start() { s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() - s.leaderChanged = make(chan struct{}) + s.leaderChanged = notify.NewNotifier() if s.ClusterVersion() != nil { lg.Info( "starting etcd server", @@ -911,7 +637,7 @@ type ServerPeerV2 interface { DowngradeEnabledHandler() http.Handler } -func (s *EtcdServer) DowngradeInfo() *membership.DowngradeInfo { return s.cluster.DowngradeInfo() } +func (s *EtcdServer) DowngradeInfo() *serverversion.DowngradeInfo { return s.cluster.DowngradeInfo() } type downgradeEnabledHandler struct { lg *zap.Logger @@ -1051,11 +777,7 @@ func (s *EtcdServer) run() { } } if newLeader { - s.leaderChangedMu.Lock() - lc := s.leaderChanged - s.leaderChanged = make(chan struct{}) - close(lc) - s.leaderChangedMu.Unlock() + s.leaderChanged.Notify() } // TODO: remove the nil checking // current test utility does not provide the stats @@ -1234,7 +956,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { // wait for raftNode to persist snapshot onto the disk <-apply.notifyc - newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks) + newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks) if err != nil { lg.Panic("failed to open snapshot backend", zap.Error(err)) } @@ -1287,7 +1009,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { if s.authStore != nil { lg.Info("restoring auth store") - s.authStore.Recover(newbe) + s.authStore.Recover(schema.NewAuthBackend(lg, newbe)) lg.Info("restored auth store") } @@ -1297,13 +1019,13 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { lg.Panic("failed to restore v2 store", zap.Error(err)) } - if err := assertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { + if err := serverstorage.AssertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil { lg.Panic("illegal v2store content", zap.Error(err)) } lg.Info("restored v2 store") - s.cluster.SetBackend(newbe) + s.cluster.SetBackend(schema.NewMembershipBackend(lg, newbe)) lg.Info("restoring cluster configuration") @@ -1360,10 +1082,9 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount { + if !s.shouldSnapshot(ep) { return } - lg := s.Logger() lg.Info( "triggering snapshot", @@ -1371,12 +1092,18 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { zap.Uint64("local-member-applied-index", ep.appliedi), zap.Uint64("local-member-snapshot-index", ep.snapi), zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), + zap.Bool("snapshot-forced", s.forceSnapshot), ) + s.forceSnapshot = false s.snapshot(ep.appliedi, ep.confState) ep.snapi = ep.appliedi } +func (s *EtcdServer) shouldSnapshot(ep *etcdProgress) bool { + return (s.forceSnapshot && ep.appliedi != ep.snapi) || (ep.appliedi-ep.snapi > s.Cfg.SnapshotCount) +} + func (s *EtcdServer) hasMultipleVotingMembers() bool { return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1 } @@ -1502,18 +1229,6 @@ func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done } // when the server is being stopped. func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping } -func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() } - -func (s *EtcdServer) LeaderStats() []byte { - lead := s.getLead() - if lead != uint64(s.id) { - return nil - } - return s.lstats.JSON() -} - -func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() } - func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { // In the context of ordinary etcd process, s.authStore will never be nil. @@ -1841,9 +1556,7 @@ func (s *EtcdServer) getLead() uint64 { } func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { - s.leaderChangedMu.RLock() - defer s.leaderChangedMu.RUnlock() - return s.leaderChanged + return s.leaderChanged.Receive() } // FirstCommitInTermNotify returns channel that will be unlocked on first @@ -1851,9 +1564,7 @@ func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} { // read-only requests (leader is not able to respond any read-only requests // as long as linearizable semantic is required) func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { - s.firstCommitInTermMu.RLock() - defer s.firstCommitInTermMu.RUnlock() - return s.firstCommitInTermC + return s.firstCommitInTerm.Receive() } // RaftStatusGetter represents etcd server and Raft progress. @@ -1993,70 +1704,6 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { } } -// publish registers server information into the cluster. The information -// is the JSON representation of this server's member struct, updated with the -// static clientURLs of the server. -// The function keeps attempting to register until it succeeds, -// or its server is stopped. -// -// Use v2 store to encode member attributes, and apply through Raft -// but does not go through v2 API endpoint, which means even with v2 -// client handler disabled (e.g. --enable-v2=false), cluster can still -// process publish requests through rafthttp -// TODO: Remove in 3.6 (start using publishV3) -func (s *EtcdServer) publish(timeout time.Duration) { - lg := s.Logger() - b, err := json.Marshal(s.attributes) - if err != nil { - lg.Panic("failed to marshal JSON", zap.Error(err)) - return - } - req := pb.Request{ - Method: "PUT", - Path: membership.MemberAttributesStorePath(s.id), - Val: string(b), - } - - for { - ctx, cancel := context.WithTimeout(s.ctx, timeout) - _, err := s.Do(ctx, req) - cancel() - switch err { - case nil: - close(s.readych) - lg.Info( - "published local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("request-path", req.Path), - zap.String("cluster-id", s.cluster.ID().String()), - zap.Duration("publish-timeout", timeout), - ) - return - - case ErrStopped: - lg.Warn( - "stopped publish because server is stopped", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.Duration("publish-timeout", timeout), - zap.Error(err), - ) - return - - default: - lg.Warn( - "failed to publish local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), - zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), - zap.String("request-path", req.Path), - zap.Duration("publish-timeout", timeout), - zap.Error(err), - ) - } - } -} - func (s *EtcdServer) sendMergedSnap(merged snap.Message) { atomic.AddInt64(&s.inflightSnapshots, 1) @@ -2148,7 +1795,7 @@ func (s *EtcdServer) apply( return appliedt, appliedi, shouldStop } -// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer +// applyEntryNormal applies an EntryNormal type raftpb request to the EtcdServer func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { shouldApplyV3 := membership.ApplyV2storeOnly index := s.consistIndex.ConsistentIndex() @@ -2165,7 +1812,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { // raft state machine may generate noop entry when leader confirmation. // skip it in advance to avoid some potential bug in the future if len(e.Data) == 0 { - s.notifyAboutFirstCommitInTerm() + s.firstCommitInTerm.Notify() // promote lessor when the local member is leader and finished // applying all entries from the last term. @@ -2194,6 +1841,9 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { id := raftReq.ID if id == 0 { + if raftReq.Header == nil { + s.lg.Panic("applyEntryNormal, could not find a header") + } id = raftReq.Header.ID } @@ -2239,15 +1889,6 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { }) } -func (s *EtcdServer) notifyAboutFirstCommitInTerm() { - newNotifier := make(chan struct{}) - s.firstCommitInTermMu.Lock() - notifierToClose := s.firstCommitInTermC - s.firstCommitInTermC = newNotifier - s.firstCommitInTermMu.Unlock() - close(notifierToClose) -} - // applyConfChange applies a ConfChange to the server. It is only // invoked with a ConfChange that has already passed through Raft func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) { @@ -2424,15 +2065,12 @@ func (s *EtcdServer) ClusterVersion() *semver.Version { return s.cluster.Version() } -// monitorVersions checks the member's version every monitorVersionInterval. -// It updates the cluster version if all members agrees on a higher one. -// It prints out log if there is a member with a higher version than the -// local version. -// TODO switch to updateClusterVersionV3 in 3.6 -func (s *EtcdServer) monitorVersions() { +// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed. +func (s *EtcdServer) monitorClusterVersions() { + monitor := serverversion.NewMonitor(s.Logger(), newServerVersionAdapter(s)) for { select { - case <-s.FirstCommitInTermNotify(): + case <-s.firstCommitInTerm.Receive(): case <-time.After(monitorVersionInterval): case <-s.stopping: return @@ -2441,31 +2079,21 @@ func (s *EtcdServer) monitorVersions() { if s.Leader() != s.ID() { continue } + monitor.UpdateClusterVersionIfNeeded() + } +} - v := decideClusterVersion(s.Logger(), getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) - if v != nil { - // only keep major.minor version for comparison - v = &semver.Version{ - Major: v.Major, - Minor: v.Minor, - } - } - - // if the current version is nil: - // 1. use the decided version if possible - // 2. or use the min cluster version - if s.cluster.Version() == nil { - verStr := version.MinClusterVersion - if v != nil { - verStr = v.String() - } - s.GoAttach(func() { s.updateClusterVersionV2(verStr) }) - continue - } - - if v != nil && membership.IsValidVersionChange(s.cluster.Version(), v) { - s.GoAttach(func() { s.updateClusterVersionV2(v.String()) }) +// monitorStorageVersion every monitorVersionInterval updates storage version if needed. +func (s *EtcdServer) monitorStorageVersion() { + monitor := serverversion.NewMonitor(s.Logger(), newServerVersionAdapter(s)) + for { + select { + case <-time.After(monitorVersionInterval): + case <-s.clusterVersionChanged.Receive(): + case <-s.stopping: + return } + monitor.UpdateStorageVersionIfNeeded() } } @@ -2545,12 +2173,13 @@ func (s *EtcdServer) updateClusterVersionV3(ver string) { } } +// monitorDowngrade every DowngradeCheckTime checks if it's the leader and cancels downgrade if needed. func (s *EtcdServer) monitorDowngrade() { + monitor := serverversion.NewMonitor(s.Logger(), newServerVersionAdapter(s)) t := s.Cfg.DowngradeCheckTime if t == 0 { return } - lg := s.Logger() for { select { case <-time.After(t): @@ -2561,22 +2190,7 @@ func (s *EtcdServer) monitorDowngrade() { if !s.isLeader() { continue } - - d := s.cluster.DowngradeInfo() - if !d.Enabled { - continue - } - - targetVersion := d.TargetVersion - v := semver.Must(semver.NewVersion(targetVersion)) - if isMatchedVersions(s.Logger(), v, getVersions(s.Logger(), s.cluster, s.id, s.peerRt)) { - lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) - if _, err := s.downgradeCancel(ctx); err != nil { - lg.Warn("failed to cancel downgrade", zap.Error(err)) - } - cancel() - } + monitor.CancelDowngradeIfNeeded() } } @@ -2624,7 +2238,7 @@ func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } func (s *EtcdServer) restoreAlarms() error { s.applyV3 = s.newApplierV3() - as, err := v3alarm.NewAlarmStore(s.lg, s) + as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be)) if err != nil { return err } @@ -2679,21 +2293,6 @@ func (s *EtcdServer) raftStatus() raft.Status { return s.r.Node.Status() } -func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error { - size := be.Size() - sizeInUse := be.SizeInUse() - freeableMemory := uint(size - sizeInUse) - thresholdBytes := cfg.ExperimentalBootstrapDefragThresholdMegabytes * 1024 * 1024 - if freeableMemory < thresholdBytes { - cfg.Logger.Info("Skipping defragmentation", - zap.Int64("current-db-size-bytes", size), - zap.String("current-db-size", humanize.Bytes(uint64(size))), - zap.Int64("current-db-size-in-use-bytes", sizeInUse), - zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))), - zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes), - zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))), - ) - return nil - } - return be.Defrag() +func (s *EtcdServer) Version() *serverversion.Manager { + return serverversion.NewManager(s.Logger(), newServerVersionAdapter(s)) } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go index 72d10c1796..74c823b621 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go @@ -19,7 +19,7 @@ import ( "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" humanize "github.com/dustin/go-humanize" "go.uber.org/zap" diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go index 442288a6ee..3e868bebda 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go @@ -23,14 +23,14 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/api/v3/membershippb" + "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" "github.com/gogo/protobuf/proto" "go.uber.org/zap" @@ -292,7 +292,7 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e defer cancel() // renewals don't go through raft; forward to leader manually - for cctx.Err() == nil && err != nil { + for cctx.Err() == nil { leader, lerr := s.waitLeader(cctx) if lerr != nil { return -1, lerr @@ -384,7 +384,7 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) return nil, ErrNoLeader } } - if leader == nil || len(leader.PeerURLs) == 0 { + if len(leader.PeerURLs) == 0 { return nil, ErrNoLeader } return leader, nil @@ -709,7 +709,7 @@ func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } func (s *EtcdServer) linearizableReadLoop() { for { requestId := s.reqIDGen.Next() - leaderChangedNotifier := s.LeaderChangedNotify() + leaderChangedNotifier := s.leaderChanged.Receive() select { case <-leaderChangedNotifier: continue @@ -775,7 +775,7 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, retryTimer := time.NewTimer(readIndexRetryTime) defer retryTimer.Stop() - firstCommitInTermNotifier := s.FirstCommitInTermNotify() + firstCommitInTermNotifier := s.firstCommitInTerm.Receive() for { select { @@ -803,7 +803,7 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, // return a retryable error. return 0, ErrLeaderChanged case <-firstCommitInTermNotifier: - firstCommitInTermNotifier = s.FirstCommitInTermNotify() + firstCommitInTermNotifier = s.firstCommitInTerm.Receive() lg.Info("first commit in current term: resending ReadIndex request") err := s.sendReadIndex(requestId) if err != nil { @@ -919,73 +919,40 @@ func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.Downg return nil, err } - // gets leaders commit index and wait for local store to finish applying that index - // to avoid using stale downgrade information - err = s.linearizableReadNotify(ctx) - if err != nil { - return nil, err - } - cv := s.ClusterVersion() if cv == nil { return nil, ErrClusterVersionUnavailable } - resp.Version = cv.String() - - allowedTargetVersion := membership.AllowedDowngradeVersion(cv) - if !targetVersion.Equal(*allowedTargetVersion) { - return nil, ErrInvalidDowngradeTargetVersion + resp.Version = version.Cluster(cv.String()) + err = s.Version().DowngradeValidate(ctx, targetVersion) + if err != nil { + return nil, err } - downgradeInfo := s.cluster.DowngradeInfo() - if downgradeInfo.Enabled { - // Todo: return the downgrade status along with the error msg - return nil, ErrDowngradeInProcess - } return resp, nil } func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) { - // validate downgrade capability before starting downgrade - v := r.Version lg := s.Logger() - if resp, err := s.downgradeValidate(ctx, v); err != nil { - lg.Warn("reject downgrade request", zap.Error(err)) - return resp, err - } - targetVersion, err := convertToClusterVersion(v) + targetVersion, err := convertToClusterVersion(r.Version) if err != nil { lg.Warn("reject downgrade request", zap.Error(err)) return nil, err } - - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()} - _, err = s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + err = s.Version().DowngradeEnable(ctx, targetVersion) if err != nil { lg.Warn("reject downgrade request", zap.Error(err)) return nil, err } - resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} return &resp, nil } func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) { - // gets leaders commit index and wait for local store to finish applying that index - // to avoid using stale downgrade information - if err := s.linearizableReadNotify(ctx); err != nil { - return nil, err - } - - downgradeInfo := s.cluster.DowngradeInfo() - if !downgradeInfo.Enabled { - return nil, ErrNoInflightDowngrade - } - - raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false} - _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest}) + err := s.Version().DowngradeCancel(ctx) if err != nil { - return nil, err + s.lg.Warn("failed to cancel downgrade", zap.Error(err)) } - resp := pb.DowngradeResponse{Version: s.ClusterVersion().String()} + resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())} return &resp, nil } diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go similarity index 81% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go index 475c4b1f95..c34f905119 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2http/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v2http provides etcd client and server implementations. -package v2http +// Package version provides functions for getting/saving storage version. +package version diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go similarity index 62% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go index 9fdafe22aa..60cab4931d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/downgrade.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package membership +package version import ( "github.com/coreos/go-semver/semver" @@ -34,47 +34,42 @@ func (d *DowngradeInfo) GetTargetVersion() *semver.Version { // isValidDowngrade verifies whether the cluster can be downgraded from verFrom to verTo func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool { - return verTo.Equal(*AllowedDowngradeVersion(verFrom)) + return verTo.Equal(*allowedDowngradeVersion(verFrom)) } -// mustDetectDowngrade will detect unexpected downgrade when the local server is recovered. -func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version, d *DowngradeInfo) { - lv := semver.Must(semver.NewVersion(version.Version)) +// MustDetectDowngrade will detect local server joining cluster that doesn't support it's version. +func MustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) { // only keep major.minor version for comparison against cluster version - lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} - - // if the cluster enables downgrade, check local version against downgrade target version. - if d != nil && d.Enabled && d.TargetVersion != "" { - if lv.Equal(*d.GetTargetVersion()) { - if cv != nil { - lg.Info( - "cluster is downgrading to target version", - zap.String("target-cluster-version", d.TargetVersion), - zap.String("determined-cluster-version", version.Cluster(cv.String())), - zap.String("current-server-version", version.Version), - ) - } - return - } - lg.Fatal( - "invalid downgrade; server version is not allowed to join when downgrade is enabled", - zap.String("current-server-version", version.Version), - zap.String("target-cluster-version", d.TargetVersion), - ) - } + sv = &semver.Version{Major: sv.Major, Minor: sv.Minor} // if the cluster disables downgrade, check local version against determined cluster version. // the validation passes when local version is not less than cluster version - if cv != nil && lv.LessThan(*cv) { - lg.Fatal( + if cv != nil && sv.LessThan(*cv) { + lg.Panic( "invalid downgrade; server version is lower than determined cluster version", - zap.String("current-server-version", version.Version), + zap.String("current-server-version", sv.String()), zap.String("determined-cluster-version", version.Cluster(cv.String())), ) } } -func AllowedDowngradeVersion(ver *semver.Version) *semver.Version { +func allowedDowngradeVersion(ver *semver.Version) *semver.Version { // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x) return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1} } + +// IsValidVersionChange checks the two scenario when version is valid to change: +// 1. Downgrade: cluster version is 1 minor version higher than local version, +// cluster version should change. +// 2. Cluster start: when not all members version are available, cluster version +// is set to MinVersion(3.0), when all members are at higher version, cluster version +// is lower than local version, cluster version should change +func IsValidVersionChange(cv *semver.Version, lv *semver.Version) bool { + cv = &semver.Version{Major: cv.Major, Minor: cv.Minor} + lv = &semver.Version{Major: lv.Major, Minor: lv.Minor} + + if isValidDowngrade(cv, lv) || (cv.Major == lv.Major && cv.LessThan(*lv)) { + return true + } + return false +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go new file mode 100644 index 0000000000..906aa9f413 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go @@ -0,0 +1,23 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "errors" + +var ( + ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version") + ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress") + ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job") +) diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go new file mode 100644 index 0000000000..8ac8d8e8d6 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go @@ -0,0 +1,209 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "context" + + "github.com/coreos/go-semver/semver" + "go.etcd.io/etcd/api/v3/version" + "go.uber.org/zap" +) + +// Monitor contains logic used by cluster leader to monitor version changes and decide on cluster version or downgrade progress. +type Monitor struct { + lg *zap.Logger + s Server +} + +// Server lists EtcdServer methods needed by Monitor +type Server interface { + GetClusterVersion() *semver.Version + GetDowngradeInfo() *DowngradeInfo + GetMembersVersions() map[string]*version.Versions + UpdateClusterVersion(string) + LinearizableReadNotify(ctx context.Context) error + DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error + DowngradeCancel(ctx context.Context) error + + GetStorageVersion() *semver.Version + UpdateStorageVersion(semver.Version) error +} + +func NewMonitor(lg *zap.Logger, storage Server) *Monitor { + return &Monitor{ + lg: lg, + s: storage, + } +} + +// UpdateClusterVersionIfNeeded updates the cluster version. +func (m *Monitor) UpdateClusterVersionIfNeeded() { + newClusterVersion := m.decideClusterVersion() + if newClusterVersion != nil { + newClusterVersion = &semver.Version{Major: newClusterVersion.Major, Minor: newClusterVersion.Minor} + m.s.UpdateClusterVersion(newClusterVersion.String()) + } +} + +// decideClusterVersion decides whether to change cluster version and its next value. +// New cluster version is based on the members versions server and whether cluster is downgrading. +// Returns nil if cluster version should be left unchanged. +func (m *Monitor) decideClusterVersion() *semver.Version { + clusterVersion := m.s.GetClusterVersion() + minimalServerVersion := m.membersMinimalServerVersion() + if clusterVersion == nil { + if minimalServerVersion != nil { + return minimalServerVersion + } + return semver.New(version.MinClusterVersion) + } + if minimalServerVersion == nil { + return nil + } + downgrade := m.s.GetDowngradeInfo() + if downgrade != nil && downgrade.Enabled { + if IsValidVersionChange(clusterVersion, downgrade.GetTargetVersion()) && IsValidVersionChange(minimalServerVersion, downgrade.GetTargetVersion()) { + return downgrade.GetTargetVersion() + } + m.lg.Error("Cannot downgrade cluster version, version change is not valid", + zap.String("downgrade-version", downgrade.TargetVersion), + zap.String("cluster-version", clusterVersion.String()), + zap.String("minimal-server-version", minimalServerVersion.String()), + ) + return nil + } + if clusterVersion.LessThan(*minimalServerVersion) && IsValidVersionChange(clusterVersion, minimalServerVersion) { + return minimalServerVersion + } + return nil +} + +// UpdateStorageVersionIfNeeded updates the storage version if it differs from cluster version. +func (m *Monitor) UpdateStorageVersionIfNeeded() { + cv := m.s.GetClusterVersion() + if cv == nil { + return + } + sv := m.s.GetStorageVersion() + + if sv == nil || sv.Major != cv.Major || sv.Minor != cv.Minor { + if sv != nil { + m.lg.Info("storage version differs from storage version.", zap.String("cluster-version", cv.String()), zap.String("storage-version", sv.String())) + } + err := m.s.UpdateStorageVersion(semver.Version{Major: cv.Major, Minor: cv.Minor}) + if err != nil { + m.lg.Error("failed update storage version", zap.String("cluster-version", cv.String()), zap.Error(err)) + return + } + d := m.s.GetDowngradeInfo() + if d != nil && d.Enabled { + m.lg.Info( + "The server is ready to downgrade", + zap.String("target-version", d.TargetVersion), + zap.String("server-version", version.Version), + ) + } + } +} + +func (m *Monitor) CancelDowngradeIfNeeded() { + d := m.s.GetDowngradeInfo() + if d == nil || !d.Enabled { + return + } + + targetVersion := d.TargetVersion + v := semver.Must(semver.NewVersion(targetVersion)) + if m.versionsMatchTarget(v) { + m.lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion)) + err := m.s.DowngradeCancel(context.Background()) + if err != nil { + m.lg.Warn("failed to cancel downgrade", zap.Error(err)) + } + } +} + +// membersMinimalServerVersion returns the min server version in the map, or nil if the min +// version in unknown. +// It prints out log if there is a member with a higher version than the +// local version. +func (m *Monitor) membersMinimalServerVersion() *semver.Version { + vers := m.s.GetMembersVersions() + var minV *semver.Version + lv := semver.Must(semver.NewVersion(version.Version)) + + for mid, ver := range vers { + if ver == nil { + return nil + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + m.lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return nil + } + if lv.LessThan(*v) { + m.lg.Warn( + "leader found higher-versioned member", + zap.String("local-member-version", lv.String()), + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + ) + } + if minV == nil { + minV = v + } else if v.LessThan(*minV) { + minV = v + } + } + return minV +} + +// versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false. +// It can be used to decide the whether the cluster finishes downgrading to target version. +func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool { + vers := m.s.GetMembersVersions() + targetVersion = &semver.Version{Major: targetVersion.Major, Minor: targetVersion.Minor} + for mid, ver := range vers { + if ver == nil { + return false + } + v, err := semver.NewVersion(ver.Server) + if err != nil { + m.lg.Warn( + "failed to parse server version of remote member", + zap.String("remote-member-id", mid), + zap.String("remote-member-version", ver.Server), + zap.Error(err), + ) + return false + } + v = &semver.Version{Major: v.Major, Minor: v.Minor} + if !targetVersion.Equal(*v) { + m.lg.Warn("remotes server has mismatching etcd version", + zap.String("remote-member-id", mid), + zap.String("current-server-version", v.String()), + zap.String("target-version", targetVersion.String()), + ) + return false + } + } + return true +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go new file mode 100644 index 0000000000..0a2f99a1fa --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go @@ -0,0 +1,81 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "context" + + "github.com/coreos/go-semver/semver" + "go.uber.org/zap" +) + +// Manager contains logic to manage etcd cluster version downgrade process. +type Manager struct { + lg *zap.Logger + s Server +} + +// NewManager returns a new manager instance +func NewManager(lg *zap.Logger, s Server) *Manager { + return &Manager{ + lg: lg, + s: s, + } +} + +// DowngradeValidate validates if cluster is downloadable to provided target version and returns error if not. +func (m *Manager) DowngradeValidate(ctx context.Context, targetVersion *semver.Version) error { + // gets leaders commit index and wait for local store to finish applying that index + // to avoid using stale downgrade information + err := m.s.LinearizableReadNotify(ctx) + if err != nil { + return err + } + cv := m.s.GetClusterVersion() + allowedTargetVersion := allowedDowngradeVersion(cv) + if !targetVersion.Equal(*allowedTargetVersion) { + return ErrInvalidDowngradeTargetVersion + } + + downgradeInfo := m.s.GetDowngradeInfo() + if downgradeInfo != nil && downgradeInfo.Enabled { + // Todo: return the downgrade status along with the error msg + return ErrDowngradeInProcess + } + return nil +} + +// DowngradeEnable initiates etcd cluster version downgrade process. +func (m *Manager) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error { + // validate downgrade capability before starting downgrade + err := m.DowngradeValidate(ctx, targetVersion) + if err != nil { + return err + } + return m.s.DowngradeEnable(ctx, targetVersion) +} + +// DowngradeCancel cancels ongoing downgrade process. +func (m *Manager) DowngradeCancel(ctx context.Context) error { + err := m.s.LinearizableReadNotify(ctx) + if err != nil { + return err + } + downgradeInfo := m.s.GetDowngradeInfo() + if !downgradeInfo.Enabled { + return ErrNoInflightDowngrade + } + return m.s.DowngradeCancel(ctx) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go index e8174f396f..55139c04b3 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go +++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go @@ -37,7 +37,8 @@ func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) { // NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger". func NewRaftLoggerZap(lg *zap.Logger) raft.Logger { - return &zapRaftLogger{lg: lg, sugar: lg.Sugar()} + skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1)) + return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()} } // NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core" diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go index 4b0a60a9be..542c3a82a0 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go @@ -19,7 +19,7 @@ import ( "context" "errors" "fmt" - "io/ioutil" + "io" "net/http" "time" @@ -53,7 +53,7 @@ func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } defer r.Body.Close() - b, err := ioutil.ReadAll(r.Body) + b, err := io.ReadAll(r.Body) if err != nil { http.Error(w, "error reading body", http.StatusBadRequest) return @@ -236,13 +236,13 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b)) } if lresp.LeaseTimeToLiveResponse.ID != int64(id) { - return nil, fmt.Errorf("lease: renew id mismatch") + return nil, fmt.Errorf("lease: TTL id mismatch") } return lresp, nil } func readResponse(resp *http.Response) (b []byte, err error) { - b, err = ioutil.ReadAll(resp.Body) + b, err = io.ReadAll(resp.Body) httputil.GracefulClose(resp) return } diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go index 7236515f2b..0a77fd669d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go +++ b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go @@ -24,10 +24,11 @@ import ( "sync" "time" + "github.com/coreos/go-semver/semver" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/lease/leasepb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -37,6 +38,8 @@ const NoLease = LeaseID(0) // MaxLeaseTTL is the maximum lease TTL value const MaxLeaseTTL = 9000000000 +var v3_6 = semver.Version{Major: 3, Minor: 6} + var ( forever = time.Time{} @@ -180,19 +183,29 @@ type lessor struct { checkpointInterval time.Duration // the interval to check if the expired lease is revoked expiredLeaseRetryInterval time.Duration + // whether lessor should always persist remaining TTL (always enabled in v3.6). + checkpointPersist bool + // cluster is used to adapt lessor logic based on cluster version + cluster cluster +} + +type cluster interface { + // Version is the cluster-wide minimum major.minor version. + Version() *semver.Version } type LessorConfig struct { MinLeaseTTL int64 CheckpointInterval time.Duration ExpiredLeasesRetryInterval time.Duration + CheckpointPersist bool } -func NewLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) Lessor { - return newLessor(lg, b, cfg) +func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor { + return newLessor(lg, b, cluster, cfg) } -func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor { +func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor { checkpointInterval := cfg.CheckpointInterval expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval if checkpointInterval == 0 { @@ -210,11 +223,13 @@ func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor { minLeaseTTL: cfg.MinLeaseTTL, checkpointInterval: checkpointInterval, expiredLeaseRetryInterval: expiredLeaseRetryInterval, + checkpointPersist: cfg.CheckpointPersist, // expiredC is a small buffered chan to avoid unnecessary blocking. expiredC: make(chan []*Lease, 16), stopC: make(chan struct{}), doneC: make(chan struct{}), lg: lg, + cluster: cluster, } l.initAndRecover() @@ -336,7 +351,7 @@ func (le *lessor) Revoke(id LeaseID) error { // lease deletion needs to be in the same backend transaction with the // kv deletion. Or we might end up with not executing the revoke or not // deleting the keys if etcdserver fails in between. - le.b.BatchTx().UnsafeDelete(buckets.Lease, int64ToBytes(int64(l.ID))) + schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)}) txn.End() @@ -351,6 +366,9 @@ func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { if l, ok := le.leaseMap[id]; ok { // when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry l.remainingTTL = remainingTTL + if le.shouldPersistCheckpoints() { + l.persistTo(le.b) + } if le.isPrimary() { // schedule the next checkpoint as needed le.scheduleCheckpointIfNeeded(l) @@ -359,6 +377,15 @@ func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil } +func (le *lessor) shouldPersistCheckpoints() bool { + cv := le.cluster.Version() + return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, v3_6)) +} + +func greaterOrEqual(first, second semver.Version) bool { + return !first.LessThan(second) +} + // Renew renews an existing lease. If the given lease does not exist or // has expired, an error will be returned. func (le *lessor) Renew(id LeaseID) (int64, error) { @@ -446,6 +473,7 @@ func (le *lessor) Promote(extend time.Duration) { l.refresh(extend) item := &LeaseWithTime{id: l.ID, time: l.expiry} le.leaseExpiredNotifier.RegisterOrUpdate(item) + le.scheduleCheckpointIfNeeded(l) } if len(le.leaseMap) < leaseRevokeRate { @@ -768,18 +796,12 @@ func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCh func (le *lessor) initAndRecover() { tx := le.b.BatchTx() - tx.Lock() - tx.UnsafeCreateBucket(buckets.Lease) - _, vs := tx.UnsafeRange(buckets.Lease, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0) - // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue. - for i := range vs { - var lpb leasepb.Lease - err := lpb.Unmarshal(vs[i]) - if err != nil { - tx.Unlock() - panic("failed to unmarshal lease proto item") - } + tx.Lock() + schema.UnsafeCreateLeaseBucket(tx) + lpbs := schema.MustUnsafeGetAllLeases(tx) + tx.Unlock() + for _, lpb := range lpbs { ID := LeaseID(lpb.ID) if lpb.TTL < le.minLeaseTTL { lpb.TTL = le.minLeaseTTL @@ -789,14 +811,14 @@ func (le *lessor) initAndRecover() { ttl: lpb.TTL, // itemSet will be filled in when recover key-value pairs // set expiry to forever, refresh when promoted - itemSet: make(map[LeaseItem]struct{}), - expiry: forever, - revokec: make(chan struct{}), + itemSet: make(map[LeaseItem]struct{}), + expiry: forever, + revokec: make(chan struct{}), + remainingTTL: lpb.RemainingTTL, } } le.leaseExpiredNotifier.Init() heap.Init(&le.leaseCheckpointHeap) - tx.Unlock() le.b.ForceCommit() } @@ -821,17 +843,11 @@ func (l *Lease) expired() bool { } func (l *Lease) persistTo(b backend.Backend) { - key := int64ToBytes(int64(l.ID)) - lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL} - val, err := lpb.Marshal() - if err != nil { - panic("failed to marshal lease proto item") - } - - b.BatchTx().Lock() - b.BatchTx().UnsafePut(buckets.Lease, key, val) - b.BatchTx().Unlock() + tx := b.BatchTx() + tx.Lock() + defer tx.Unlock() + schema.MustUnsafePutLease(tx, &lpb) } // TTL returns the TTL of the Lease. diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/cluster.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/cluster.go index 1f7dccbe74..be4e143401 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/cluster.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/cluster.go @@ -34,7 +34,7 @@ const resolveRetryRate = 1 type clusterProxy struct { lg *zap.Logger - clus clientv3.Cluster + clus pb.ClusterClient ctx context.Context // advertise client URL @@ -67,7 +67,7 @@ func NewClusterProxy(lg *zap.Logger, c *clientv3.Client, advaddr string, prefix cp := &clusterProxy{ lg: lg, - clus: c.Cluster, + clus: pb.NewClusterClient(c.ActiveConnection()), ctx: c.Ctx(), advaddr: advaddr, @@ -123,46 +123,15 @@ func (cp *clusterProxy) monitor(wa endpoints.WatchChannel) { } func (cp *clusterProxy) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) { - if r.IsLearner { - return cp.memberAddAsLearner(ctx, r.PeerURLs) - } - return cp.memberAdd(ctx, r.PeerURLs) -} - -func (cp *clusterProxy) memberAdd(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) { - mresp, err := cp.clus.MemberAdd(ctx, peerURLs) - if err != nil { - return nil, err - } - resp := (pb.MemberAddResponse)(*mresp) - return &resp, err -} - -func (cp *clusterProxy) memberAddAsLearner(ctx context.Context, peerURLs []string) (*pb.MemberAddResponse, error) { - mresp, err := cp.clus.MemberAddAsLearner(ctx, peerURLs) - if err != nil { - return nil, err - } - resp := (pb.MemberAddResponse)(*mresp) - return &resp, err + return cp.clus.MemberAdd(ctx, r) } func (cp *clusterProxy) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - mresp, err := cp.clus.MemberRemove(ctx, r.ID) - if err != nil { - return nil, err - } - resp := (pb.MemberRemoveResponse)(*mresp) - return &resp, err + return cp.clus.MemberRemove(ctx, r) } func (cp *clusterProxy) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { - mresp, err := cp.clus.MemberUpdate(ctx, r.ID, r.PeerURLs) - if err != nil { - return nil, err - } - resp := (pb.MemberUpdateResponse)(*mresp) - return &resp, err + return cp.clus.MemberUpdate(ctx, r) } func (cp *clusterProxy) membersFromUpdates() ([]*pb.Member, error) { @@ -199,12 +168,7 @@ func (cp *clusterProxy) MemberList(ctx context.Context, r *pb.MemberListRequest) hostname, _ := os.Hostname() return &pb.MemberListResponse{Members: []*pb.Member{{Name: hostname, ClientURLs: []string{cp.advaddr}}}}, nil } - mresp, err := cp.clus.MemberList(ctx) - if err != nil { - return nil, err - } - resp := (pb.MemberListResponse)(*mresp) - return &resp, err + return cp.clus.MemberList(ctx, r) } func (cp *clusterProxy) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) { diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/health.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/health.go index 1d6f7a2d8b..882af4b46a 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/health.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/health.go @@ -31,7 +31,7 @@ func HandleHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { if lg == nil { lg = zap.NewNop() } - mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkHealth(c) })) + mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet, serializable bool) etcdhttp.Health { return checkHealth(c) })) } // HandleProxyHealth registers health handler on '/proxy/health'. @@ -39,7 +39,7 @@ func HandleProxyHealth(lg *zap.Logger, mux *http.ServeMux, c *clientv3.Client) { if lg == nil { lg = zap.NewNop() } - mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet) etcdhttp.Health { return checkProxyHealth(c) })) + mux.Handle(etcdhttp.PathProxyHealth, etcdhttp.NewHealthHandler(lg, func(excludedAlarms etcdhttp.AlarmSet, serializable bool) etcdhttp.Health { return checkProxyHealth(c) })) } func checkHealth(c *clientv3.Client) etcdhttp.Health { diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/metrics.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/metrics.go index 01a7a94c89..d2a62f3183 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/metrics.go @@ -16,7 +16,7 @@ package grpcproxy import ( "fmt" - "io/ioutil" + "io" "math/rand" "net/http" "strings" @@ -94,7 +94,7 @@ func HandleMetrics(mux *http.ServeMux, c *http.Client, eps []string) { } defer resp.Body.Close() w.Header().Set("Content-Type", "text/plain; version=0.0.4") - body, _ := ioutil.ReadAll(resp.Body) + body, _ := io.ReadAll(resp.Body) fmt.Fprintf(w, "%s", body) }) } diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/watcher.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/watcher.go index 5f6c3db808..a15edda884 100644 --- a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/watcher.go +++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/watcher.go @@ -20,7 +20,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/v3" - "go.etcd.io/etcd/server/v3/mvcc" + "go.etcd.io/etcd/server/v3/storage/mvcc" ) type watchRange struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go similarity index 78% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend.go index 081be2b525..abbbf889d7 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/backend.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package storage import ( "fmt" @@ -22,8 +22,8 @@ import ( "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -55,8 +55,8 @@ func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { return backend.New(bcfg) } -// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. -func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks backend.Hooks) (backend.Backend, error) { +// OpenSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func OpenSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks *BackendHooks) (backend.Backend, error) { snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) if err != nil { return nil, fmt.Errorf("failed to find database snapshot file (%v)", err) @@ -64,11 +64,11 @@ func openSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot if err := os.Rename(snapPath, cfg.BackendPath()); err != nil { return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err) } - return openBackend(cfg, hooks), nil + return OpenBackend(cfg, hooks), nil } -// openBackend returns a backend using the current etcd db. -func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { +// OpenBackend returns a backend using the current etcd db. +func OpenBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { fn := cfg.BackendPath() now, beOpened := time.Now(), make(chan backend.Backend) @@ -92,18 +92,18 @@ func openBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend { return <-beOpened } -// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// RecoverSnapshotBackend recovers the DB from a snapshot in case etcd crashes // before updating the backend db after persisting raft snapshot to disk, // violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this // case, replace the db with the snapshot db sent by the leader. -func recoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks backend.Hooks) (backend.Backend, error) { +func RecoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks *BackendHooks) (backend.Backend, error) { consistentIndex := uint64(0) if beExist { - consistentIndex, _ = cindex.ReadConsistentIndex(oldbe.BatchTx()) + consistentIndex, _ = schema.ReadConsistentIndex(oldbe.BatchTx()) } if snapshot.Metadata.Index <= consistentIndex { return oldbe, nil } oldbe.Close() - return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) + return OpenSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go similarity index 98% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go index b7207c1717..c558ecacd6 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/backend.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go @@ -18,7 +18,6 @@ import ( "fmt" "hash/crc32" "io" - "io/ioutil" "os" "path/filepath" "sync" @@ -100,8 +99,9 @@ type backend struct { // mlock prevents backend database file to be swapped mlock bool - mu sync.RWMutex - db *bolt.DB + mu sync.RWMutex + bopts *bolt.Options + db *bolt.DB batchInterval time.Duration batchLimit int @@ -185,7 +185,8 @@ func newBackend(bcfg BackendConfig) *backend { // In future, may want to make buffering optional for low-concurrency systems // or dynamically swap between buffered/non-buffered depending on workload. b := &backend{ - db: db, + bopts: bopts, + db: db, batchInterval: bcfg.BatchInterval, batchLimit: bcfg.BatchLimit, @@ -432,6 +433,8 @@ func (b *backend) Defrag() error { func (b *backend) defrag() error { now := time.Now() + isDefragActive.Set(1) + defer isDefragActive.Set(0) // TODO: make this non-blocking? // lock batchTx to ensure nobody is using previous tx, and then @@ -454,7 +457,7 @@ func (b *backend) defrag() error { // Create a temporary file to ensure we start with a clean slate. // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup. dir := filepath.Dir(b.db.Path()) - temp, err := ioutil.TempFile(dir, "db.tmp.*") + temp, err := os.CreateTemp(dir, "db.tmp.*") if err != nil { return err } @@ -509,13 +512,7 @@ func (b *backend) defrag() error { b.lg.Fatal("failed to rename tmp database", zap.Error(err)) } - defragmentedBoltOptions := bolt.Options{} - if boltOpenOptions != nil { - defragmentedBoltOptions = *boltOpenOptions - } - defragmentedBoltOptions.Mlock = b.mlock - - b.db, err = bolt.Open(dbp, 0600, &defragmentedBoltOptions) + b.db, err = bolt.Open(dbp, 0600, b.bopts) if err != nil { b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err)) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/batch_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/batch_tx.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_default.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_default.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_linux.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_linux.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_windows.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/config_windows.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/hooks.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go similarity index 92% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go index d9641af7ae..9d58c00638 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go @@ -83,6 +83,13 @@ var ( // highest bucket start of 0.01 sec * 2^16 == 655.36 sec Buckets: prometheus.ExponentialBuckets(.01, 2, 17), }) + + isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "defrag_inflight", + Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.", + }) ) func init() { @@ -92,4 +99,5 @@ func init() { prometheus.MustRegister(writeSec) prometheus.MustRegister(defragSec) prometheus.MustRegister(snapshotTransferSec) + prometheus.MustRegister(isDefragActive) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/read_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/read_tx.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go similarity index 99% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go index 6674002483..779255b732 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/backend/tx_buffer.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go @@ -148,7 +148,7 @@ func newBucketBuffer() *bucketBuffer { func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } idx := sort.Search(bb.used, f) - if idx < 0 { + if idx < 0 || idx >= bb.used { return nil, nil } if len(endKey) == 0 { diff --git a/vendor/go.etcd.io/etcd/server/v3/datadir/datadir.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/datadir/datadir.go rename to vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go diff --git a/vendor/go.etcd.io/etcd/server/v3/datadir/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/datadir/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go new file mode 100644 index 0000000000..e9a9f250d4 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go @@ -0,0 +1,60 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "sync" + + "go.uber.org/zap" + + "go.etcd.io/etcd/raft/v3/raftpb" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +type BackendHooks struct { + indexer cindex.ConsistentIndexer + lg *zap.Logger + + // confState to Be written in the next submitted Backend transaction (if dirty) + confState raftpb.ConfState + // first write changes it to 'dirty'. false by default, so + // not initialized `confState` is meaningless. + confStateDirty bool + confStateLock sync.Mutex +} + +func NewBackendHooks(lg *zap.Logger, indexer cindex.ConsistentIndexer) *BackendHooks { + return &BackendHooks{lg: lg, indexer: indexer} +} + +func (bh *BackendHooks) OnPreCommitUnsafe(tx backend.BatchTx) { + bh.indexer.UnsafeSave(tx) + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + if bh.confStateDirty { + schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState) + // save bh.confState + bh.confStateDirty = false + } +} + +func (bh *BackendHooks) SetConfState(confState *raftpb.ConfState) { + bh.confStateLock.Lock() + defer bh.confStateLock.Unlock() + bh.confState = *confState + bh.confStateDirty = true +} diff --git a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go similarity index 59% rename from vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go rename to vendor/go.etcd.io/etcd/server/v3/storage/metrics.go index 765c21a289..cb7f87057f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go @@ -1,4 +1,4 @@ -// Copyright The OpenTelemetry Authors +// Copyright 2021 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,24 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package noop provides noop tracing implementations for tracer and span. -package noop +package storage import ( - "context" - - "go.opentelemetry.io/otel/trace" + "github.com/prometheus/client_golang/prometheus" ) -var ( - // Tracer is a noop tracer that starts noop spans. - Tracer trace.Tracer - - // Span is a noop Span. - Span trace.Span -) +var quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "quota_backend_bytes", + Help: "Current backend storage quota size in bytes.", +}) func init() { - Tracer = trace.NewNoopTracerProvider().Tracer("") - _, Span = Tracer.Start(context.Background(), "") + prometheus.MustRegister(quotaBackendBytes) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go similarity index 95% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/index.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go index 0a5cb00516..be817c5a6d 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/index.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go @@ -218,8 +218,8 @@ func (ti *treeIndex) Compact(rev int64) map[revision]struct{} { clone.Ascend(func(item btree.Item) bool { keyi := item.(*keyIndex) - //Lock is needed here to prevent modification to the keyIndex while - //compaction is going on or revision added to empty before deletion + // Lock is needed here to prevent modification to the keyIndex while + // compaction is going on or revision added to empty before deletion ti.Lock() keyi.compact(ti.lg, rev, available) if keyi.isEmpty() { @@ -257,8 +257,14 @@ func (ti *treeIndex) Equal(bi index) bool { equal := true ti.tree.Ascend(func(item btree.Item) bool { - aki := item.(*keyIndex) - bki := b.tree.Get(item).(*keyIndex) + var aki, bki *keyIndex + var ok bool + if aki, ok = item.(*keyIndex); !ok { + return false + } + if bki, ok = b.tree.Get(item).(*keyIndex); !ok { + return false + } if !aki.equal(bki) { equal = false return false diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go similarity index 99% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go index 58ad4832eb..61a13b989e 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/key_index.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go @@ -133,7 +133,7 @@ func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error { } // get gets the modified, created revision and version of the key that satisfies the given atRev. -// Rev must be higher than or equal to the given atRev. +// Rev must be smaller than or equal to the given atRev. func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) { if ki.isEmpty() { lg.Panic( diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go similarity index 99% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go index 79c2e68700..10c4821b14 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go @@ -20,7 +20,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/server/v3/storage/backend" ) type RangeOptions struct { diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kv_view.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kv_view.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go similarity index 90% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go index 54055ed055..dbf59239c1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go @@ -27,16 +27,13 @@ import ( "go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) var ( - scheduledCompactKeyName = []byte("scheduledCompactRev") - finishedCompactKeyName = []byte("finishedCompactRev") - ErrCompacted = errors.New("mvcc: required revision has been compacted") ErrFutureRev = errors.New("mvcc: required revision is a future revision") ) @@ -52,9 +49,11 @@ const ( var restoreChunkKeys = 10000 // non-const for testing var defaultCompactBatchLimit = 1000 +var minimumBatchInterval = 10 * time.Millisecond type StoreConfig struct { - CompactionBatchLimit int + CompactionBatchLimit int + CompactionSleepInterval time.Duration } type store struct { @@ -96,6 +95,9 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi if cfg.CompactionBatchLimit == 0 { cfg.CompactionBatchLimit = defaultCompactBatchLimit } + if cfg.CompactionSleepInterval == 0 { + cfg.CompactionSleepInterval = minimumBatchInterval + } s := &store{ cfg: cfg, b: b, @@ -120,8 +122,8 @@ func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfi tx := s.b.BatchTx() tx.Lock() - tx.UnsafeCreateBucket(buckets.Key) - tx.UnsafeCreateBucket(buckets.Meta) + tx.UnsafeCreateBucket(schema.Key) + schema.UnsafeCreateMetaBucket(tx) tx.Unlock() s.b.ForceCommit() @@ -140,7 +142,7 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { select { case <-s.stopc: default: - // fix deadlock in mvcc,for more information, please refer to pr 11817. + // fix deadlock in mvcc, for more information, please refer to pr 11817. // s.stopc is only updated in restore operation, which is called by apply // snapshot call, compaction and apply snapshot requests are serialized by // raft, and do not happen at the same time. @@ -159,7 +161,7 @@ func (s *store) Hash() (hash uint32, revision int64, err error) { start := time.Now() s.b.ForceCommit() - h, err := s.b.Hash(buckets.DefaultIgnores) + h, err := s.b.Hash(schema.DefaultIgnores) hashSec.Observe(time.Since(start).Seconds()) return h, s.currentRev, err @@ -195,8 +197,8 @@ func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev lower := revision{main: compactRev + 1} h := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - h.Write(buckets.Key.Name()) - err = tx.UnsafeForEach(buckets.Key, func(k, v []byte) error { + h.Write(schema.Key.Name()) + err = tx.UnsafeForEach(schema.Key, func(k, v []byte) error { kr := bytesToRev(k) if !upper.GreaterThan(kr) { return nil @@ -234,13 +236,7 @@ func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) { s.compactMainRev = rev - rbytes := newRevBytes() - revToBytes(revision{main: rev}, rbytes) - - tx := s.b.BatchTx() - tx.Lock() - tx.UnsafePut(buckets.Meta, scheduledCompactKeyName, rbytes) - tx.Unlock() + SetScheduledCompact(s.b.BatchTx(), rev) // ensure that desired compaction is persisted s.b.ForceCommit() @@ -337,30 +333,24 @@ func (s *store) restore() error { tx := s.b.BatchTx() tx.Lock() - _, finishedCompactBytes := tx.UnsafeRange(buckets.Meta, finishedCompactKeyName, nil, 0) - if len(finishedCompactBytes) != 0 { + finishedCompact, found := UnsafeReadFinishedCompact(tx) + if found { s.revMu.Lock() - s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main + s.compactMainRev = finishedCompact s.lg.Info( "restored last compact revision", - zap.Stringer("meta-bucket-name", buckets.Meta), - zap.String("meta-bucket-name-key", string(finishedCompactKeyName)), + zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)), zap.Int64("restored-compact-revision", s.compactMainRev), ) s.revMu.Unlock() } - _, scheduledCompactBytes := tx.UnsafeRange(buckets.Meta, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - } - + scheduledCompact, _ := UnsafeReadScheduledCompact(tx) // index keys concurrently as they're loaded in from tx keysGauge.Set(0) rkvc, revc := restoreIntoIndex(s.lg, s.kvindex) for { - keys, vals := tx.UnsafeRange(buckets.Key, min, max, int64(restoreChunkKeys)) + keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys)) if len(keys) == 0 { break } @@ -421,8 +411,6 @@ func (s *store) restore() error { s.lg.Info( "resume scheduled compaction", - zap.Stringer("meta-bucket-name", buckets.Meta), - zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)), zap.Int64("scheduled-compact-revision", scheduledCompact), ) } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go similarity index 82% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go index 71bd4b7369..ba94400821 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_compaction.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "time" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -32,6 +32,9 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc end := make([]byte, 8) binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) + batchNum := s.cfg.CompactionBatchLimit + batchInterval := s.cfg.CompactionSleepInterval + last := make([]byte, 8+1+8) for { var rev revision @@ -40,19 +43,17 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc tx := s.b.BatchTx() tx.Lock() - keys, _ := tx.UnsafeRange(buckets.Key, last, end, int64(s.cfg.CompactionBatchLimit)) + keys, _ := tx.UnsafeRange(schema.Key, last, end, int64(batchNum)) for _, key := range keys { rev = bytesToRev(key) if _, ok := keep[rev]; !ok { - tx.UnsafeDelete(buckets.Key, key) + tx.UnsafeDelete(schema.Key, key) keyCompactions++ } } - if len(keys) < s.cfg.CompactionBatchLimit { - rbytes := make([]byte, 8+1+8) - revToBytes(revision{main: compactMainRev}, rbytes) - tx.UnsafePut(buckets.Meta, finishedCompactKeyName, rbytes) + if len(keys) < batchNum { + UnsafeSetFinishedCompact(tx, compactMainRev) tx.Unlock() s.lg.Info( "finished scheduled compaction", @@ -62,15 +63,15 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc return true } + tx.Unlock() // update last revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last) - tx.Unlock() // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer s.b.ForceCommit() dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond)) select { - case <-time.After(10 * time.Millisecond): + case <-time.After(batchInterval): case <-s.stopc: return false } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go index 93d7db20e0..fb7a9ca1fa 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/kvstore_txn.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go @@ -20,8 +20,8 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -62,6 +62,61 @@ func (tr *storeTxnRead) Range(ctx context.Context, key, end []byte, ro RangeOpti return tr.rangeKeys(ctx, key, end, tr.Rev(), ro) } +func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + if ro.Count { + total := tr.s.kvindex.CountRevisions(key, end, rev) + tr.trace.Step("count revisions from in-memory index tree") + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) + tr.trace.Step("range keys from in-memory index tree") + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil + } + + limit := int(ro.Limit) + if limit <= 0 || limit > len(revpairs) { + limit = len(revpairs) + } + + kvs := make([]mvccpb.KeyValue, limit) + revBytes := newRevBytes() + for i, revpair := range revpairs[:len(kvs)] { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + revToBytes(revpair, revBytes) + _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0) + if len(vs) != 1 { + tr.s.lg.Fatal( + "range failed to find revision pair", + zap.Int64("revision-main", revpair.main), + zap.Int64("revision-sub", revpair.sub), + ) + } + if err := kvs[i].Unmarshal(vs[0]); err != nil { + tr.s.lg.Fatal( + "failed to unmarshal mvccpb.KeyValue", + zap.Error(err), + ) + } + } + tr.trace.Step("range keys from bolt db") + return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil +} + func (tr *storeTxnRead) End() { tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx. tr.s.mu.RUnlock() @@ -124,61 +179,6 @@ func (tw *storeTxnWrite) End() { tw.s.mu.RUnlock() } -func (tr *storeTxnRead) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { - rev := ro.Rev - if rev > curRev { - return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev - } - if rev <= 0 { - rev = curRev - } - if rev < tr.s.compactMainRev { - return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted - } - if ro.Count { - total := tr.s.kvindex.CountRevisions(key, end, rev) - tr.trace.Step("count revisions from in-memory index tree") - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit)) - tr.trace.Step("range keys from in-memory index tree") - if len(revpairs) == 0 { - return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil - } - - limit := int(ro.Limit) - if limit <= 0 || limit > len(revpairs) { - limit = len(revpairs) - } - - kvs := make([]mvccpb.KeyValue, limit) - revBytes := newRevBytes() - for i, revpair := range revpairs[:len(kvs)] { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - revToBytes(revpair, revBytes) - _, vs := tr.tx.UnsafeRange(buckets.Key, revBytes, nil, 0) - if len(vs) != 1 { - tr.s.lg.Fatal( - "range failed to find revision pair", - zap.Int64("revision-main", revpair.main), - zap.Int64("revision-sub", revpair.sub), - ) - } - if err := kvs[i].Unmarshal(vs[0]); err != nil { - tr.s.lg.Fatal( - "failed to unmarshal mvccpb.KeyValue", - zap.Error(err), - ) - } - } - tr.trace.Step("range keys from bolt db") - return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil -} - func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { rev := tw.beginRev + 1 c := rev @@ -215,11 +215,16 @@ func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { } tw.trace.Step("marshal mvccpb.KeyValue") - tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) + tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) tw.s.kvindex.Put(key, idxRev) tw.changes = append(tw.changes, kv) tw.trace.Step("store kv pair into bolt db") + if oldLease == leaseID { + tw.trace.Step("attach lease to kv pair") + return + } + if oldLease != lease.NoLease { if tw.s.le == nil { panic("no lessor to detach lease") @@ -276,7 +281,7 @@ func (tw *storeTxnWrite) delete(key []byte) { ) } - tw.tx.UnsafeSeqPut(buckets.Key, ibytes, d) + tw.tx.UnsafeSeqPut(schema.Key, ibytes, d) err = tw.s.kvindex.Tombstone(key, idxRev) if err != nil { tw.storeTxnRead.s.lg.Fatal( diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go similarity index 97% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go index f28d114e2b..c7ece518e4 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go @@ -28,13 +28,6 @@ var ( Name: "range_total", Help: "Total number of ranges seen by this member.", }) - rangeCounterDebug = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: "etcd_debugging", - Subsystem: "mvcc", - Name: "range_total", - Help: "Total number of ranges seen by this member.", - }) putCounter = prometheus.NewCounter( prometheus.CounterOpts{ @@ -280,7 +273,6 @@ var ( func init() { prometheus.MustRegister(rangeCounter) - prometheus.MustRegister(rangeCounterDebug) prometheus.MustRegister(putCounter) prometheus.MustRegister(deleteCounter) prometheus.MustRegister(txnCounter) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go index af844f8468..aef877a1c1 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/metrics_txn.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go @@ -61,7 +61,6 @@ func (tw *metricsTxnWrite) End() { ranges := float64(tw.ranges) rangeCounter.Add(ranges) - rangeCounterDebug.Add(ranges) // TODO: remove in 3.5 release puts := float64(tw.puts) putCounter.Add(puts) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/revision.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/revision.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go new file mode 100644 index 0000000000..e530c82f4e --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go @@ -0,0 +1,60 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" +) + +func UnsafeReadFinishedCompact(tx backend.ReadTx) (finishedComact int64, found bool) { + _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0) + if len(finishedCompactBytes) != 0 { + return bytesToRev(finishedCompactBytes[0]).main, true + } + return 0, false +} + +func UnsafeReadScheduledCompact(tx backend.ReadTx) (scheduledComact int64, found bool) { + _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0) + if len(scheduledCompactBytes) != 0 { + return bytesToRev(scheduledCompactBytes[0]).main, true + } + return 0, false +} + +func SetScheduledCompact(tx backend.BatchTx, value int64) { + tx.Lock() + defer tx.Unlock() + UnsafeSetScheduledCompact(tx, value) +} + +func UnsafeSetScheduledCompact(tx backend.BatchTx, value int64) { + rbytes := newRevBytes() + revToBytes(revision{main: value}, rbytes) + tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes) +} + +func SetFinishedCompact(tx backend.BatchTx, value int64) { + tx.Lock() + defer tx.Unlock() + UnsafeSetFinishedCompact(tx, value) +} + +func UnsafeSetFinishedCompact(tx backend.BatchTx, value int64) { + rbytes := newRevBytes() + revToBytes(revision{main: value}, rbytes) + tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/util.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/util.go similarity index 87% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/util.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/util.go index 83cbf44bf8..bf5d9c196b 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/util.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/util.go @@ -18,8 +18,8 @@ import ( "fmt" "go.etcd.io/etcd/api/v3/mvccpb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" ) func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { @@ -32,6 +32,6 @@ func WriteKV(be backend.Backend, kv mvccpb.KeyValue) { } be.BatchTx().Lock() - be.BatchTx().UnsafePut(buckets.Key, ibytes, d) + be.BatchTx().UnsafePut(schema.Key, ibytes, d) be.BatchTx().Unlock() } diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go similarity index 96% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go index 3c7edb3337..5a201d4edb 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go @@ -21,8 +21,8 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/lease" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/schema" "go.uber.org/zap" ) @@ -354,11 +354,14 @@ func (s *watchableStore) syncWatchers() int { // values are actual key-value pairs in backend. tx := s.store.b.ReadTx() tx.RLock() - revs, vs := tx.UnsafeRange(buckets.Key, minBytes, maxBytes, 0) - tx.RUnlock() + revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0) evs := kvsToEvents(s.store.lg, wg, revs, vs) + // Must unlock after kvsToEvents, because vs (come from boltdb memory) is not deep copy. + // We can only unlock after Unmarshal, which will do deep copy. + // Otherwise we will trigger SIGSEGV during boltdb re-mmap. + tx.RUnlock() - var victims watcherBatch + victims := make(watcherBatch) wb := newWatcherBatch(wg, evs) for w := range wg.watchers { w.minRev = curRev + 1 @@ -378,9 +381,6 @@ func (s *watchableStore) syncWatchers() int { if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) { pendingEventsGauge.Add(float64(len(eb.evs))) } else { - if victims == nil { - victims = make(watcherBatch) - } w.victim = true } @@ -432,7 +432,7 @@ func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []m // notify notifies the fact that given event at the given rev just happened to // watchers that watch on the key of the event. func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { - var victim watcherBatch + victim := make(watcherBatch) for w, eb := range newWatcherBatch(&s.synced, evs) { if eb.revs != 1 { s.store.lg.Panic( @@ -445,9 +445,6 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { } else { // move slow watcher to victims w.minRev = rev + 1 - if victim == nil { - victim = make(watcherBatch) - } w.victim = true victim[w] = eb s.synced.delete(w) @@ -458,7 +455,7 @@ func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) { } func (s *watchableStore) addVictim(victim watcherBatch) { - if victim == nil { + if len(victim) == 0 { return } s.victims = append(s.victims, victim) diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watchable_store_txn.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watcher.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watcher.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/watcher_group.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/watcher_group.go rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go similarity index 74% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go rename to vendor/go.etcd.io/etcd/server/v3/storage/quota.go index 33c06e6190..46b3506537 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/quota.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go @@ -12,12 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package storage import ( "sync" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/config" + "go.etcd.io/etcd/server/v3/storage/backend" humanize "github.com/dustin/go-humanize" "go.uber.org/zap" @@ -50,15 +52,15 @@ func (*passthroughQuota) Available(interface{}) bool { return true } func (*passthroughQuota) Cost(interface{}) int { return 0 } func (*passthroughQuota) Remaining() int64 { return 1 } -type backendQuota struct { - s *EtcdServer +type BackendQuota struct { + be backend.Backend maxBackendBytes int64 } const ( // leaseOverhead is an estimate for the cost of storing a lease leaseOverhead = 64 - // kvOverhead is an estimate for the cost of storing a key's metadata + // kvOverhead is an estimate for the cost of storing a key's Metadata kvOverhead = 256 ) @@ -71,23 +73,23 @@ var ( ) // NewBackendQuota creates a quota layer with the given storage limit. -func NewBackendQuota(s *EtcdServer, name string) Quota { - lg := s.Logger() - quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes)) +func NewBackendQuota(cfg config.ServerConfig, be backend.Backend, name string) Quota { + lg := cfg.Logger + quotaBackendBytes.Set(float64(cfg.QuotaBackendBytes)) - if s.Cfg.QuotaBackendBytes < 0 { + if cfg.QuotaBackendBytes < 0 { // disable quotas if negative quotaLogOnce.Do(func() { lg.Info( "disabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), + zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), ) }) return &passthroughQuota{} } - if s.Cfg.QuotaBackendBytes == 0 { + if cfg.QuotaBackendBytes == 0 { // use default size if no quota size given quotaLogOnce.Do(func() { if lg != nil { @@ -100,16 +102,16 @@ func NewBackendQuota(s *EtcdServer, name string) Quota { } }) quotaBackendBytes.Set(float64(DefaultQuotaBytes)) - return &backendQuota{s, DefaultQuotaBytes} + return &BackendQuota{be, DefaultQuotaBytes} } quotaLogOnce.Do(func() { - if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + if cfg.QuotaBackendBytes > MaxQuotaBytes { lg.Warn( "quota exceeds the maximum value", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(cfg.QuotaBackendBytes))), zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), zap.String("quota-maximum-size", maxQuotaSize), ) @@ -117,19 +119,24 @@ func NewBackendQuota(s *EtcdServer, name string) Quota { lg.Info( "enabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), + zap.String("quota-size", humanize.Bytes(uint64(cfg.QuotaBackendBytes))), ) }) - return &backendQuota{s, s.Cfg.QuotaBackendBytes} + return &BackendQuota{be, cfg.QuotaBackendBytes} } -func (b *backendQuota) Available(v interface{}) bool { - // TODO: maybe optimize backend.Size() - return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes +func (b *BackendQuota) Available(v interface{}) bool { + cost := b.Cost(v) + // if there are no mutating requests, it's safe to pass through + if cost == 0 { + return true + } + // TODO: maybe optimize Backend.Size() + return b.be.Size()+int64(cost) < b.maxBackendBytes } -func (b *backendQuota) Cost(v interface{}) int { +func (b *BackendQuota) Cost(v interface{}) int { switch r := v.(type) { case *pb.PutRequest: return costPut(r) @@ -167,6 +174,6 @@ func costTxn(r *pb.TxnRequest) int { return sizeSuccess } -func (b *backendQuota) Remaining() int64 { - return b.maxBackendBytes - b.s.Backend().Size() +func (b *BackendQuota) Remaining() int64 { + return b.maxBackendBytes - b.be.Size() } diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go new file mode 100644 index 0000000000..20c8f1193a --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go @@ -0,0 +1,93 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.etcd.io/etcd/server/v3/storage/backend" + "go.uber.org/zap" +) + +type action interface { + // unsafeDo executes the action and returns revert action, when executed + // should restore the state from before. + unsafeDo(tx backend.BatchTx) (revert action, err error) +} + +type setKeyAction struct { + Bucket backend.Bucket + FieldName []byte + FieldValue []byte +} + +func (a setKeyAction) unsafeDo(tx backend.BatchTx) (action, error) { + revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) + tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue) + return revert, nil +} + +type deleteKeyAction struct { + Bucket backend.Bucket + FieldName []byte +} + +func (a deleteKeyAction) unsafeDo(tx backend.BatchTx) (action, error) { + revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName) + tx.UnsafeDelete(a.Bucket, a.FieldName) + return revert, nil +} + +func restoreFieldValueAction(tx backend.BatchTx, bucket backend.Bucket, fieldName []byte) action { + _, vs := tx.UnsafeRange(bucket, fieldName, nil, 1) + if len(vs) == 1 { + return &setKeyAction{ + Bucket: bucket, + FieldName: fieldName, + FieldValue: vs[0], + } + } + return &deleteKeyAction{ + Bucket: bucket, + FieldName: fieldName, + } +} + +type ActionList []action + +// unsafeExecute executes actions one by one. If one of actions returns error, +// it will revert them. +func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.BatchTx) error { + var revertActions = make(ActionList, 0, len(as)) + for _, a := range as { + revert, err := a.unsafeDo(tx) + + if err != nil { + revertActions.unsafeExecuteInReversedOrder(lg, tx) + return err + } + revertActions = append(revertActions, revert) + } + return nil +} + +// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on +// action error. Should be used when reverting. +func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.BatchTx) { + for j := len(as) - 1; j >= 0; j-- { + _, err := as[j].unsafeDo(tx) + if err != nil { + lg.Panic("Cannot recover from revert error", zap.Error(err)) + } + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go new file mode 100644 index 0000000000..605bb3a0bf --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go @@ -0,0 +1,96 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.uber.org/zap" +) + +type alarmBackend struct { + lg *zap.Logger + be backend.Backend +} + +func NewAlarmBackend(lg *zap.Logger, be backend.Backend) *alarmBackend { + return &alarmBackend{ + lg: lg, + be: be, + } +} + +func (s *alarmBackend) CreateAlarmBucket() { + tx := s.be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(Alarm) +} + +func (s *alarmBackend) MustPutAlarm(alarm *etcdserverpb.AlarmMember) { + tx := s.be.BatchTx() + tx.Lock() + defer tx.Unlock() + s.mustUnsafePutAlarm(tx, alarm) +} + +func (s *alarmBackend) mustUnsafePutAlarm(tx backend.BatchTx, alarm *etcdserverpb.AlarmMember) { + v, err := alarm.Marshal() + if err != nil { + s.lg.Panic("failed to marshal alarm member", zap.Error(err)) + } + + tx.UnsafePut(Alarm, v, nil) +} + +func (s *alarmBackend) MustDeleteAlarm(alarm *etcdserverpb.AlarmMember) { + tx := s.be.BatchTx() + tx.Lock() + defer tx.Unlock() + s.mustUnsafeDeleteAlarm(tx, alarm) +} + +func (s *alarmBackend) mustUnsafeDeleteAlarm(tx backend.BatchTx, alarm *etcdserverpb.AlarmMember) { + v, err := alarm.Marshal() + if err != nil { + s.lg.Panic("failed to marshal alarm member", zap.Error(err)) + } + + tx.UnsafeDelete(Alarm, v) +} + +func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) { + tx := s.be.ReadTx() + tx.Lock() + defer tx.Unlock() + return s.unsafeGetAllAlarms(tx) +} + +func (s *alarmBackend) unsafeGetAllAlarms(tx backend.ReadTx) ([]*etcdserverpb.AlarmMember, error) { + ms := []*etcdserverpb.AlarmMember{} + err := tx.UnsafeForEach(Alarm, func(k, v []byte) error { + var m etcdserverpb.AlarmMember + if err := m.Unmarshal(k); err != nil { + return err + } + ms = append(ms, &m) + return nil + }) + return ms, err +} + +func (s alarmBackend) ForceCommit() { + s.be.ForceCommit() +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go new file mode 100644 index 0000000000..93ef34c371 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go @@ -0,0 +1,113 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "bytes" + "encoding/binary" + + "go.uber.org/zap" + + "go.etcd.io/etcd/server/v3/auth" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +const ( + revBytesLen = 8 +) + +var ( + authEnabled = []byte{1} + authDisabled = []byte{0} +) + +type authBackend struct { + be backend.Backend + lg *zap.Logger +} + +var _ auth.AuthBackend = (*authBackend)(nil) + +func NewAuthBackend(lg *zap.Logger, be backend.Backend) *authBackend { + return &authBackend{ + be: be, + lg: lg, + } +} + +func (abe *authBackend) CreateAuthBuckets() { + tx := abe.be.BatchTx() + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(Auth) + tx.UnsafeCreateBucket(AuthUsers) + tx.UnsafeCreateBucket(AuthRoles) +} + +func (abe *authBackend) ForceCommit() { + abe.be.ForceCommit() +} + +func (abe *authBackend) BatchTx() auth.AuthBatchTx { + return &authBatchTx{tx: abe.be.BatchTx(), lg: abe.lg} +} + +type authBatchTx struct { + tx backend.BatchTx + lg *zap.Logger +} + +var _ auth.AuthBatchTx = (*authBatchTx)(nil) + +func (atx *authBatchTx) UnsafeSaveAuthEnabled(enabled bool) { + if enabled { + atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authEnabled) + } else { + atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authDisabled) + } +} + +func (atx *authBatchTx) UnsafeSaveAuthRevision(rev uint64) { + revBytes := make([]byte, revBytesLen) + binary.BigEndian.PutUint64(revBytes, rev) + atx.tx.UnsafePut(Auth, AuthRevisionKeyName, revBytes) +} + +func (atx *authBatchTx) UnsafeReadAuthEnabled() bool { + _, vs := atx.tx.UnsafeRange(Auth, AuthEnabledKeyName, nil, 0) + if len(vs) == 1 { + if bytes.Equal(vs[0], authEnabled) { + return true + } + } + return false +} + +func (atx *authBatchTx) UnsafeReadAuthRevision() uint64 { + _, vs := atx.tx.UnsafeRange(Auth, AuthRevisionKeyName, nil, 0) + if len(vs) != 1 { + // this can happen in the initialization phase + return 0 + } + return binary.BigEndian.Uint64(vs[0]) +} + +func (atx *authBatchTx) Lock() { + atx.tx.Lock() +} + +func (atx *authBatchTx) Unlock() { + atx.tx.Unlock() +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go new file mode 100644 index 0000000000..541e37b719 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go @@ -0,0 +1,88 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.etcd.io/etcd/api/v3/authpb" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.uber.org/zap" +) + +func UnsafeCreateAuthRolesBucket(tx backend.BatchTx) { + tx.UnsafeCreateBucket(AuthRoles) +} + +func (abe *authBackend) GetRole(roleName string) *authpb.Role { + tx := abe.BatchTx() + tx.Lock() + defer tx.Unlock() + return tx.UnsafeGetRole(roleName) +} + +func (atx *authBatchTx) UnsafeGetRole(roleName string) *authpb.Role { + _, vs := atx.tx.UnsafeRange(AuthRoles, []byte(roleName), nil, 0) + if len(vs) == 0 { + return nil + } + + role := &authpb.Role{} + err := role.Unmarshal(vs[0]) + if err != nil { + atx.lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } + return role +} + +func (abe *authBackend) GetAllRoles() []*authpb.Role { + tx := abe.BatchTx() + tx.Lock() + defer tx.Unlock() + return tx.UnsafeGetAllRoles() +} + +func (atx *authBatchTx) UnsafeGetAllRoles() []*authpb.Role { + _, vs := atx.tx.UnsafeRange(AuthRoles, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + roles := make([]*authpb.Role, len(vs)) + for i := range vs { + role := &authpb.Role{} + err := role.Unmarshal(vs[i]) + if err != nil { + atx.lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err)) + } + roles[i] = role + } + return roles +} + +func (atx *authBatchTx) UnsafePutRole(role *authpb.Role) { + b, err := role.Marshal() + if err != nil { + atx.lg.Panic( + "failed to marshal 'authpb.Role'", + zap.String("role-name", string(role.Name)), + zap.Error(err), + ) + } + + atx.tx.UnsafePut(AuthRoles, role.Name, b) +} + +func (atx *authBatchTx) UnsafeDeleteRole(rolename string) { + atx.tx.UnsafeDelete(AuthRoles, []byte(rolename)) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go new file mode 100644 index 0000000000..f385afa512 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go @@ -0,0 +1,82 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "go.etcd.io/etcd/api/v3/authpb" + "go.uber.org/zap" +) + +func (abe *authBackend) GetUser(username string) *authpb.User { + tx := abe.BatchTx() + tx.Lock() + defer tx.Unlock() + return tx.UnsafeGetUser(username) +} + +func (atx *authBatchTx) UnsafeGetUser(username string) *authpb.User { + _, vs := atx.tx.UnsafeRange(AuthUsers, []byte(username), nil, 0) + if len(vs) == 0 { + return nil + } + + user := &authpb.User{} + err := user.Unmarshal(vs[0]) + if err != nil { + atx.lg.Panic( + "failed to unmarshal 'authpb.User'", + zap.String("user-name", username), + zap.Error(err), + ) + } + return user +} + +func (abe *authBackend) GetAllUsers() []*authpb.User { + tx := abe.BatchTx() + tx.Lock() + defer tx.Unlock() + return tx.UnsafeGetAllUsers() +} + +func (atx *authBatchTx) UnsafeGetAllUsers() []*authpb.User { + _, vs := atx.tx.UnsafeRange(AuthUsers, []byte{0}, []byte{0xff}, -1) + if len(vs) == 0 { + return nil + } + + users := make([]*authpb.User, len(vs)) + for i := range vs { + user := &authpb.User{} + err := user.Unmarshal(vs[i]) + if err != nil { + atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } + users[i] = user + } + return users +} + +func (atx *authBatchTx) UnsafePutUser(user *authpb.User) { + b, err := user.Marshal() + if err != nil { + atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err)) + } + atx.tx.UnsafePut(AuthUsers, user.Name, b) +} + +func (atx *authBatchTx) UnsafeDeleteUser(username string) { + atx.tx.UnsafeDelete(AuthUsers, []byte(username)) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go similarity index 75% rename from vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go rename to vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go index 9214f72f25..e5eda721b5 100644 --- a/vendor/go.etcd.io/etcd/server/v3/mvcc/buckets/bucket.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package buckets +package schema import ( "bytes" - "go.etcd.io/etcd/server/v3/mvcc/backend" + "go.etcd.io/etcd/client/pkg/v3/types" + "go.etcd.io/etcd/server/v3/storage/backend" ) var ( @@ -67,14 +68,31 @@ func (b bucket) String() string { return string(b.Name()) } func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket } var ( + // Pre v3.5 + ScheduledCompactKeyName = []byte("scheduledCompactRev") + FinishedCompactKeyName = []byte("finishedCompactRev") MetaConsistentIndexKeyName = []byte("consistent_index") - MetaTermKeyName = []byte("term") + AuthEnabledKeyName = []byte("authEnabled") + AuthRevisionKeyName = []byte("authRevision") + // Since v3.5 + MetaTermKeyName = []byte("term") + MetaConfStateName = []byte("confState") + ClusterClusterVersionKeyName = []byte("clusterVersion") + ClusterDowngradeKeyName = []byte("downgrade") + // Since v3.6 + MetaStorageVersionName = []byte("storageVersion") + // Before adding new meta key please update server/etcdserver/version ) // DefaultIgnores defines buckets & keys to ignore in hash checking. func DefaultIgnores(bucket, key []byte) bool { // consistent index & term might be changed due to v2 internal sync, which // is not controllable by the user. + // storage version might change after wal snapshot and is not controller by user. return bytes.Compare(bucket, Meta.Name()) == 0 && - (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0) + (bytes.Compare(key, MetaTermKeyName) == 0 || bytes.Compare(key, MetaConsistentIndexKeyName) == 0 || bytes.Compare(key, MetaStorageVersionName) == 0) +} + +func BackendMemberKey(id types.ID) []byte { + return []byte(id.String()) } diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go new file mode 100644 index 0000000000..6eb0b75120 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go @@ -0,0 +1,50 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import "go.etcd.io/etcd/server/v3/storage/backend" + +type schemaChange interface { + upgradeAction() action + downgradeAction() action +} + +// addNewField represents adding new field when upgrading. Downgrade will remove the field. +func addNewField(bucket backend.Bucket, fieldName []byte, fieldValue []byte) schemaChange { + return simpleSchemaChange{ + upgrade: setKeyAction{ + Bucket: bucket, + FieldName: fieldName, + FieldValue: fieldValue, + }, + downgrade: deleteKeyAction{ + Bucket: bucket, + FieldName: fieldName, + }, + } +} + +type simpleSchemaChange struct { + upgrade action + downgrade action +} + +func (c simpleSchemaChange) upgradeAction() action { + return c.upgrade +} + +func (c simpleSchemaChange) downgradeAction() action { + return c.downgrade +} diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go new file mode 100644 index 0000000000..d7b06b9cef --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go @@ -0,0 +1,95 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package schema + +import ( + "encoding/binary" + "go.etcd.io/etcd/server/v3/storage/backend" +) + +// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exists yet). +func UnsafeCreateMetaBucket(tx backend.BatchTx) { + tx.UnsafeCreateBucket(Meta) +} + +// CreateMetaBucket creates the `meta` bucket (if it does not exists yet). +func CreateMetaBucket(tx backend.BatchTx) { + tx.Lock() + defer tx.Unlock() + tx.UnsafeCreateBucket(Meta) +} + +// UnsafeReadConsistentIndex loads consistent index & term from given transaction. +// returns 0,0 if the data are not found. +// Term is persisted since v3.5. +func UnsafeReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { + _, vs := tx.UnsafeRange(Meta, MetaConsistentIndexKeyName, nil, 0) + if len(vs) == 0 { + return 0, 0 + } + v := binary.BigEndian.Uint64(vs[0]) + _, ts := tx.UnsafeRange(Meta, MetaTermKeyName, nil, 0) + if len(ts) == 0 { + return v, 0 + } + t := binary.BigEndian.Uint64(ts[0]) + return v, t +} + +// ReadConsistentIndex loads consistent index and term from given transaction. +// returns 0 if the data are not found. +func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) { + tx.Lock() + defer tx.Unlock() + return UnsafeReadConsistentIndex(tx) +} + +func UnsafeUpdateConsistentIndex(tx backend.BatchTx, index uint64, term uint64, onlyGrow bool) { + if index == 0 { + // Never save 0 as it means that we didn't load the real index yet. + return + } + + if onlyGrow { + oldi, oldTerm := UnsafeReadConsistentIndex(tx) + if term < oldTerm { + return + } + if index > oldi { + bs1 := make([]byte, 8) + binary.BigEndian.PutUint64(bs1, index) + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1) + } + if term > 0 && term > oldTerm { + bs2 := make([]byte, 8) + binary.BigEndian.PutUint64(bs2, term) + tx.UnsafePut(Meta, MetaTermKeyName, bs2) + } + return + } + + bs1 := make([]byte, 8) + binary.BigEndian.PutUint64(bs1, index) + // put the index into the underlying backend + // tx has been locked in TxnBegin, so there is no need to lock it again + tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1) + if term > 0 { + bs2 := make([]byte, 8) + binary.BigEndian.PutUint64(bs2, term) + tx.UnsafePut(Meta, MetaTermKeyName, bs2) + } +} diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go similarity index 82% rename from vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go rename to vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go index 3aa8c649b2..a0fdad1635 100644 --- a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/confstate.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go @@ -12,22 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -package membership +package schema import ( "encoding/json" "log" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/mvcc/backend" - "go.etcd.io/etcd/server/v3/mvcc/buckets" + "go.etcd.io/etcd/server/v3/storage/backend" "go.uber.org/zap" ) -var ( - confStateKey = []byte("confState") -) - // MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx). // confState in backend is persisted since etcd v3.5. func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confState *raftpb.ConfState) { @@ -36,20 +31,20 @@ func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.BatchTx, confSt lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err)) } - tx.UnsafePut(buckets.Meta, confStateKey, confStateBytes) + tx.UnsafePut(Meta, MetaConfStateName, confStateBytes) } // UnsafeConfStateFromBackend retrieves ConfState from the backend. // Returns nil if confState in backend is not persisted (e.g. backend writen by depending on the deprecation stage, warns or report an error +// if the v2store contains custom content. +func AssertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error { + metaOnly, err := membership.IsMetaStoreOnly(st) + if err != nil { + return err + } + if metaOnly { + return nil + } + if deprecationStage.IsAtLeast(config.V2_DEPR_1_WRITE_ONLY) { + return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage) + } + lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.") + return nil +} + +// CreateConfigChangeEnts creates a series of Raft entries (i.e. +// EntryConfChange) to remove the set of given IDs from the cluster. The ID +// `self` is _not_ removed, even if present in the set. +// If `self` is not inside the given ids, it creates a Raft entry to add a +// default member with the given `self`. +func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry { + found := false + for _, id := range ids { + if id == self { + found = true + } + } + + var ents []raftpb.Entry + next := index + 1 + + // NB: always add self first, then remove other nodes. Raft will panic if the + // set of voters ever becomes empty. + if !found { + m := membership.Member{ + ID: types.ID(self), + RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, + } + ctx, err := json.Marshal(m) + if err != nil { + lg.Panic("failed to marshal member", zap.Error(err)) + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeAddNode, + NodeID: self, + Context: ctx, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + for _, id := range ids { + if id == self { + continue + } + cc := &raftpb.ConfChange{ + Type: raftpb.ConfChangeRemoveNode, + NodeID: id, + } + e := raftpb.Entry{ + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + Term: term, + Index: next, + } + ents = append(ents, e) + next++ + } + + return ents +} + +// GetEffectiveNodeIDsFromWalEntries returns an ordered set of IDs included in the given snapshot and +// the entries. The given snapshot/entries can contain three kinds of +// ID-related entry: +// - ConfChangeAddNode, in which case the contained ID will Be added into the set. +// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set. +// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set. +func GetEffectiveNodeIDsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 { + ids := make(map[uint64]bool) + if snap != nil { + for _, id := range snap.Metadata.ConfState.Voters { + ids[id] = true + } + } + for _, e := range ents { + if e.Type != raftpb.EntryConfChange { + continue + } + var cc raftpb.ConfChange + pbutil.MustUnmarshal(&cc, e.Data) + switch cc.Type { + case raftpb.ConfChangeAddLearnerNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeAddNode: + ids[cc.NodeID] = true + case raftpb.ConfChangeRemoveNode: + delete(ids, cc.NodeID) + case raftpb.ConfChangeUpdateNode: + // do nothing + default: + lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String())) + } + } + sids := make(types.Uint64Slice, 0, len(ids)) + for id := range ids { + sids = append(sids, id) + } + sort.Sort(sids) + return []uint64(sids) +} diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go similarity index 99% rename from vendor/go.etcd.io/etcd/server/v3/wal/decoder.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go index 0251a72133..7cc634a2ea 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/decoder.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go @@ -24,7 +24,7 @@ import ( "go.etcd.io/etcd/pkg/v3/crc" "go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" ) const minSectorSize = 512 diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go similarity index 85% rename from vendor/go.etcd.io/etcd/server/v3/wal/doc.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go index 7ea348e4a9..32fa6162a2 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/doc.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go @@ -13,11 +13,11 @@ // limitations under the License. /* -Package wal provides an implementation of a write ahead log that is used by +Package wal provides an implementation of write ahead log that is used by etcd. A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside of each file the raft state and entries are appended +segmented WAL files. Inside each file the raft state and entries are appended to it with the Save method: metadata := []byte{} @@ -41,18 +41,18 @@ protobuf. The record protobuf contains a CRC, a type, and a data payload. The le record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32 value of all record protobufs preceding the current record. -WAL files are placed inside of the directory in the following format: +WAL files are placed inside the directory in the following format: $seq-$index.wal The first WAL file to be created will be 0000000000000000-0000000000000000.wal indicating an initial sequence of 0 and an initial raft index of 0. The first entry written to WAL MUST have raft index 0. -WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal +WAL will cut its current tail wal file if its size exceeds 64 MB. This will increment an internal sequence number and cause a new file to be created. If the last raft index saved was 0x20 and this is the first time cut has been called on this WAL then the sequence will increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later then the file will be called: +If a second cut issues 0x10 entries with incremental index later, then the file will be called: 0000000000000002-0000000000000031.wal. At a later time a WAL can be opened at a particular snapshot. If there is no @@ -63,7 +63,7 @@ snapshot, an empty snapshot should be passed in. The snapshot must have been written to the WAL. -Additional items cannot be Saved to this WAL until all of the items from the given +Additional items cannot be Saved to this WAL until all the items from the given snapshot to the end of the WAL are read first: metadata, state, ents, err := w.ReadAll() diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/encoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go similarity index 98% rename from vendor/go.etcd.io/etcd/server/v3/wal/encoder.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go index 61b4c20efb..d9e221ff20 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/encoder.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go @@ -23,7 +23,7 @@ import ( "go.etcd.io/etcd/pkg/v3/crc" "go.etcd.io/etcd/pkg/v3/ioutil" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" ) // walPageBytes is the alignment for flushing records to the backing Writer. diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/file_pipeline.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/file_pipeline.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/metrics.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go similarity index 89% rename from vendor/go.etcd.io/etcd/server/v3/wal/repair.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go index 122ee49a6a..c007763deb 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/repair.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go @@ -21,7 +21,7 @@ import ( "time" "go.etcd.io/etcd/client/pkg/v3/fileutil" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.uber.org/zap" ) @@ -64,9 +64,10 @@ func Repair(lg *zap.Logger, dirpath string) bool { return true case io.ErrUnexpectedEOF: - bf, bferr := os.Create(f.Name() + ".broken") + brokenName := f.Name() + ".broken" + bf, bferr := os.Create(brokenName) if bferr != nil { - lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr)) + lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr)) return false } defer bf.Close() @@ -77,7 +78,7 @@ func Repair(lg *zap.Logger, dirpath string) bool { } if _, err = io.Copy(bf, f); err != nil { - lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err)) + lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err)) return false } diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/util.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/util.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go new file mode 100644 index 0000000000..07a441f684 --- /dev/null +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go @@ -0,0 +1,282 @@ +// Copyright 2021 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package wal + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/pkg/v3/pbutil" + "go.etcd.io/etcd/raft/v3/raftpb" +) + +// ReadWALVersion reads remaining entries from opened WAL and returns struct +// that implements schema.WAL interface. +func ReadWALVersion(w *WAL) (*walVersion, error) { + _, _, ents, err := w.ReadAll() + if err != nil { + return nil, err + } + return &walVersion{entries: ents}, nil +} + +type walVersion struct { + entries []raftpb.Entry +} + +// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, +func (w *walVersion) MinimalEtcdVersion() *semver.Version { + return MinimalEtcdVersion(w.entries) +} + +// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log, +// determined by looking at entries since the last snapshot and returning the highest +// etcd version annotation from used messages, fields, enums and their values. +func MinimalEtcdVersion(ents []raftpb.Entry) *semver.Version { + var maxVer *semver.Version + for _, ent := range ents { + err := visitEntry(ent, func(path protoreflect.FullName, ver *semver.Version) error { + maxVer = maxVersion(maxVer, ver) + return nil + }) + if err != nil { + panic(err) + } + } + return maxVer +} + +type Visitor func(path protoreflect.FullName, ver *semver.Version) error + +// VisitFileDescriptor calls visitor on each field and enum value with etcd version read from proto definition. +// If field/enum value is not annotated, visitor will be called with nil. +// Upon encountering invalid annotation, will immediately exit with error. +func VisitFileDescriptor(file protoreflect.FileDescriptor, visitor Visitor) error { + msgs := file.Messages() + for i := 0; i < msgs.Len(); i++ { + err := visitMessageDescriptor(msgs.Get(i), visitor) + if err != nil { + return err + } + } + enums := file.Enums() + for i := 0; i < enums.Len(); i++ { + err := visitEnumDescriptor(enums.Get(i), visitor) + if err != nil { + return err + } + } + return nil +} + +func visitEntry(ent raftpb.Entry, visitor Visitor) error { + err := visitMessage(proto.MessageReflect(&ent), visitor) + if err != nil { + return err + } + return visitEntryData(ent.Type, ent.Data, visitor) +} + +func visitEntryData(entryType raftpb.EntryType, data []byte, visitor Visitor) error { + var msg protoreflect.Message + switch entryType { + case raftpb.EntryNormal: + var raftReq etcdserverpb.InternalRaftRequest + if err := pbutil.Unmarshaler(&raftReq).Unmarshal(data); err != nil { + // try V2 Request + var r etcdserverpb.Request + if pbutil.Unmarshaler(&r).Unmarshal(data) != nil { + // return original error + return err + } + msg = proto.MessageReflect(&r) + break + } + msg = proto.MessageReflect(&raftReq) + if raftReq.ClusterVersionSet != nil { + ver, err := semver.NewVersion(raftReq.ClusterVersionSet.Ver) + if err != nil { + return err + } + err = visitor(msg.Descriptor().FullName(), ver) + if err != nil { + return err + } + } + case raftpb.EntryConfChange: + var confChange raftpb.ConfChange + err := pbutil.Unmarshaler(&confChange).Unmarshal(data) + if err != nil { + return nil + } + msg = proto.MessageReflect(&confChange) + case raftpb.EntryConfChangeV2: + var confChange raftpb.ConfChangeV2 + err := pbutil.Unmarshaler(&confChange).Unmarshal(data) + if err != nil { + return nil + } + msg = proto.MessageReflect(&confChange) + default: + panic("unhandled") + } + return visitMessage(msg, visitor) +} + +func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor) error { + err := visitDescriptor(md, visitor) + if err != nil { + return err + } + fields := md.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + err = visitDescriptor(fd, visitor) + if err != nil { + return err + } + } + + enums := md.Enums() + for i := 0; i < enums.Len(); i++ { + err := visitEnumDescriptor(enums.Get(i), visitor) + if err != nil { + return err + } + } + return err +} + +func visitMessage(m protoreflect.Message, visitor Visitor) error { + md := m.Descriptor() + err := visitDescriptor(md, visitor) + if err != nil { + return err + } + m.Range(func(field protoreflect.FieldDescriptor, value protoreflect.Value) bool { + fd := md.Fields().Get(field.Index()) + err = visitDescriptor(fd, visitor) + if err != nil { + return false + } + + switch m := value.Interface().(type) { + case protoreflect.Message: + err = visitMessage(m, visitor) + case protoreflect.EnumNumber: + err = visitEnumNumber(fd.Enum(), m, visitor) + } + if err != nil { + return false + } + return true + }) + return err +} + +func visitEnumDescriptor(enum protoreflect.EnumDescriptor, visitor Visitor) error { + err := visitDescriptor(enum, visitor) + if err != nil { + return err + } + fields := enum.Values() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + err = visitDescriptor(fd, visitor) + if err != nil { + return err + } + } + return err +} + +func visitEnumNumber(enum protoreflect.EnumDescriptor, number protoreflect.EnumNumber, visitor Visitor) error { + err := visitDescriptor(enum, visitor) + if err != nil { + return err + } + intNumber := int(number) + fields := enum.Values() + if intNumber >= fields.Len() || intNumber < 0 { + return fmt.Errorf("could not visit EnumNumber [%d]", intNumber) + } + return visitEnumValue(fields.Get(intNumber), visitor) +} + +func visitEnumValue(enum protoreflect.EnumValueDescriptor, visitor Visitor) error { + valueOpts := enum.Options().(*descriptorpb.EnumValueOptions) + if valueOpts != nil { + ver, _ := etcdVersionFromOptionsString(valueOpts.String()) + err := visitor(enum.FullName(), ver) + if err != nil { + return err + } + } + return nil +} + +func visitDescriptor(md protoreflect.Descriptor, visitor Visitor) error { + opts, ok := md.Options().(fmt.Stringer) + if !ok { + return nil + } + ver, err := etcdVersionFromOptionsString(opts.String()) + if err != nil { + return fmt.Errorf("%s: %s", md.FullName(), err) + } + return visitor(md.FullName(), ver) +} + +func maxVersion(a *semver.Version, b *semver.Version) *semver.Version { + if a != nil && (b == nil || b.LessThan(*a)) { + return a + } + return b +} + +func etcdVersionFromOptionsString(opts string) (*semver.Version, error) { + // TODO: Use proto.GetExtention when gogo/protobuf is usable with protoreflect + msgs := []string{"[versionpb.etcd_version_msg]:", "[versionpb.etcd_version_field]:", "[versionpb.etcd_version_enum]:", "[versionpb.etcd_version_enum_value]:"} + var end, index int + for _, msg := range msgs { + index = strings.Index(opts, msg) + end = index + len(msg) + if index != -1 { + break + } + } + if index == -1 { + return nil, nil + } + var verStr string + _, err := fmt.Sscanf(opts[end:], "%q", &verStr) + if err != nil { + return nil, err + } + if strings.Count(verStr, ".") == 1 { + verStr = verStr + ".0" + } + ver, err := semver.NewVersion(verStr) + if err != nil { + return nil, err + } + return ver, nil +} diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go similarity index 98% rename from vendor/go.etcd.io/etcd/server/v3/wal/wal.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go index 3c940e0cde..187cfe397c 100644 --- a/vendor/go.etcd.io/etcd/server/v3/wal/wal.go +++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go @@ -30,7 +30,7 @@ import ( "go.etcd.io/etcd/pkg/v3/pbutil" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.uber.org/zap" ) @@ -116,7 +116,7 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { } defer os.RemoveAll(tmpdirpath) - if err := fileutil.CreateDirAll(tmpdirpath); err != nil { + if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil { lg.Warn( "failed to create a temporary WAL directory", zap.String("tmp-dir-path", tmpdirpath), @@ -234,6 +234,14 @@ func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) { return w, nil } +func (w *WAL) Reopen(lg *zap.Logger, snap walpb.Snapshot) (*WAL, error) { + err := w.Close() + if err != nil { + lg.Panic("failed to close WAL during reopen", zap.Error(err)) + } + return Open(lg, w.dir, snap) +} + func (w *WAL) SetUnsafeNoFsync() { w.unsafeNoSync = true } diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.pb.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.pb.go rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go diff --git a/vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.proto b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto similarity index 100% rename from vendor/go.etcd.io/etcd/server/v3/wal/walpb/record.proto rename to vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto diff --git a/vendor/go.etcd.io/etcd/server/v3/verify/verify.go b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go index f727201ce8..f1de10b5da 100644 --- a/vendor/go.etcd.io/etcd/server/v3/verify/verify.go +++ b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go @@ -19,11 +19,11 @@ import ( "os" "go.etcd.io/etcd/raft/v3/raftpb" - "go.etcd.io/etcd/server/v3/datadir" - "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/mvcc/backend" - wal2 "go.etcd.io/etcd/server/v3/wal" - "go.etcd.io/etcd/server/v3/wal/walpb" + "go.etcd.io/etcd/server/v3/storage/backend" + "go.etcd.io/etcd/server/v3/storage/datadir" + "go.etcd.io/etcd/server/v3/storage/schema" + wal2 "go.etcd.io/etcd/server/v3/storage/wal" + "go.etcd.io/etcd/server/v3/storage/wal/walpb" "go.uber.org/zap" ) @@ -109,7 +109,7 @@ func MustVerifyIfEnabled(cfg Config) { func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error { tx := be.BatchTx() - index, term := cindex.ReadConsistentIndex(tx) + index, term := schema.ReadConsistentIndex(tx) if cfg.ExactIndex && index != hardstate.Commit { return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit) } diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/bridge.go b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/bridge.go similarity index 83% rename from vendor/go.etcd.io/etcd/tests/v3/integration/bridge.go rename to vendor/go.etcd.io/etcd/tests/v3/framework/integration/bridge.go index 1d2be109ee..74aaf8ab94 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/bridge.go +++ b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/bridge.go @@ -15,22 +15,21 @@ package integration import ( - "fmt" "io" - "io/ioutil" "net" "sync" - - "go.etcd.io/etcd/client/pkg/v3/transport" ) -// bridge creates a unix socket bridge to another unix socket, making it possible +type Dialer interface { + Dial() (net.Conn, error) +} + +// bridge proxies connections between listener and dialer, making it possible // to disconnect grpc network connections without closing the logical grpc connection. type bridge struct { - inaddr string - outaddr string - l net.Listener - conns map[*bridgeConn]struct{} + dialer Dialer + l net.Listener + conns map[*bridgeConn]struct{} stopc chan struct{} pausec chan struct{} @@ -40,30 +39,22 @@ type bridge struct { mu sync.Mutex } -func newBridge(addr string) (*bridge, error) { +func newBridge(dialer Dialer, listener net.Listener) (*bridge, error) { b := &bridge{ // bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number - inaddr: addr + "0", - outaddr: addr, + dialer: dialer, + l: listener, conns: make(map[*bridgeConn]struct{}), stopc: make(chan struct{}), pausec: make(chan struct{}), blackholec: make(chan struct{}), } close(b.pausec) - - l, err := transport.NewUnixListener(b.inaddr) - if err != nil { - return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err) - } - b.l = l b.wg.Add(1) go b.serveListen() return b, nil } -func (b *bridge) URL() string { return "unix://" + b.inaddr } - func (b *bridge) Close() { b.l.Close() b.mu.Lock() @@ -76,7 +67,7 @@ func (b *bridge) Close() { b.wg.Wait() } -func (b *bridge) Reset() { +func (b *bridge) DropConnections() { b.mu.Lock() defer b.mu.Unlock() for bc := range b.conns { @@ -85,13 +76,13 @@ func (b *bridge) Reset() { b.conns = make(map[*bridgeConn]struct{}) } -func (b *bridge) Pause() { +func (b *bridge) PauseConnections() { b.mu.Lock() b.pausec = make(chan struct{}) b.mu.Unlock() } -func (b *bridge) Unpause() { +func (b *bridge) UnpauseConnections() { b.mu.Lock() select { case <-b.pausec: @@ -127,7 +118,7 @@ func (b *bridge) serveListen() { case <-pausec: } - outc, oerr := net.Dial("unix", b.outaddr) + outc, oerr := b.dialer.Dial() if oerr != nil { inc.Close() return @@ -205,7 +196,7 @@ func (b *bridge) ioCopy(dst io.Writer, src io.Reader) (err error) { for { select { case <-b.blackholec: - io.Copy(ioutil.Discard, src) + io.Copy(io.Discard, src) return nil default: } diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster.go b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster.go similarity index 54% rename from vendor/go.etcd.io/etcd/tests/v3/integration/cluster.go rename to vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster.go index b7e36817b7..3b5be315f3 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster.go +++ b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster.go @@ -17,8 +17,9 @@ package integration import ( "context" "crypto/tls" + "errors" "fmt" - "io/ioutil" + "io" "log" "math/rand" "net" @@ -37,13 +38,14 @@ import ( "go.etcd.io/etcd/client/pkg/v3/tlsutil" "go.etcd.io/etcd/client/pkg/v3/transport" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/client/v2" "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/pkg/v3/grpc_testing" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/v2http" "go.etcd.io/etcd/server/v3/etcdserver/api/v3client" @@ -56,6 +58,8 @@ import ( "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/soheilhy/cmux" "go.uber.org/zap" "golang.org/x/crypto/bcrypt" @@ -66,30 +70,31 @@ import ( const ( // RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss. RequestWaitTimeout = 5 * time.Second - tickDuration = 10 * time.Millisecond - requestTimeout = 20 * time.Second + TickDuration = 10 * time.Millisecond + RequestTimeout = 20 * time.Second - clusterName = "etcd" - basePort = 21000 + ClusterName = "etcd" + BasePort = 21000 URLScheme = "unix" URLSchemeTLS = "unixs" + BaseGRPCPort = 30000 ) var ( - electionTicks = 10 + ElectionTicks = 10 - // integration test uses unique ports, counting up, to listen for each + // LocalListenCount integration test uses unique ports, counting up, to listen for each // member, ensuring restarted members can listen on the same port again. - localListenCount = int64(0) + LocalListenCount = int32(0) - testTLSInfo = transport.TLSInfo{ + TestTLSInfo = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server.key.insecure"), CertFile: MustAbsPath("../fixtures/server.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoWithSpecificUsage = transport.TLSInfo{ + TestTLSInfoWithSpecificUsage = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-serverusage.key.insecure"), CertFile: MustAbsPath("../fixtures/server-serverusage.crt"), ClientKeyFile: MustAbsPath("../fixtures/client-clientusage.key.insecure"), @@ -98,29 +103,33 @@ var ( ClientCertAuth: true, } - testTLSInfoIP = transport.TLSInfo{ + TestTLSInfoIP = transport.TLSInfo{ KeyFile: MustAbsPath("../fixtures/server-ip.key.insecure"), CertFile: MustAbsPath("../fixtures/server-ip.crt"), TrustedCAFile: MustAbsPath("../fixtures/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpired = transport.TLSInfo{ + TestTLSInfoExpired = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - testTLSInfoExpiredIP = transport.TLSInfo{ + TestTLSInfoExpiredIP = transport.TLSInfo{ KeyFile: MustAbsPath("./fixtures-expired/server-ip.key.insecure"), CertFile: MustAbsPath("./fixtures-expired/server-ip.crt"), TrustedCAFile: MustAbsPath("./fixtures-expired/ca.crt"), ClientCertAuth: true, } - defaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", + DefaultTokenJWT = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", MustAbsPath("../fixtures/server.crt"), MustAbsPath("../fixtures/server.key.insecure")) + + // UniqueNumber is used to generate unique port numbers + // Should only be accessed via atomic package methods. + UniqueNumber int32 ) type ClusterConfig struct { @@ -132,8 +141,6 @@ type ClusterConfig struct { AuthToken string - UseGRPC bool - QuotaBackendBytes int64 MaxTxnOps uint @@ -145,48 +152,52 @@ type ClusterConfig struct { GRPCKeepAliveInterval time.Duration GRPCKeepAliveTimeout time.Duration - // SkipCreatingClient to skip creating clients for each member. - SkipCreatingClient bool - ClientMaxCallSendMsgSize int ClientMaxCallRecvMsgSize int // UseIP is true to use only IP for gRPC requests. UseIP bool + // UseBridge adds bridge between client and grpc server. Should be used in tests that + // want to manipulate connection or require connection not breaking despite server stop/restart. + UseBridge bool + // UseTCP configures server listen on tcp socket. If disabled unix socket is used. + UseTCP bool EnableLeaseCheckpoint bool LeaseCheckpointInterval time.Duration + LeaseCheckpointPersist bool WatchProgressNotifyInterval time.Duration + ExperimentalMaxLearners int + StrictReconfigCheck bool + CorruptCheckTime time.Duration } -type cluster struct { - cfg *ClusterConfig - Members []*member - lastMemberNum int -} +type Cluster struct { + Cfg *ClusterConfig + Members []*Member + LastMemberNum int -func (c *cluster) generateMemberName() string { - c.lastMemberNum++ - return fmt.Sprintf("m%v", c.lastMemberNum-1) + mu sync.Mutex + clusterClient *clientv3.Client } -func schemeFromTLSInfo(tls *transport.TLSInfo) string { +func SchemeFromTLSInfo(tls *transport.TLSInfo) string { if tls == nil { return URLScheme } return URLSchemeTLS } -func (c *cluster) fillClusterForMembers() error { - if c.cfg.DiscoveryURL != "" { - // cluster will be discovered +func (c *Cluster) fillClusterForMembers() error { + if c.Cfg.DiscoveryURL != "" { + // Cluster will be discovered return nil } addrs := make([]string, 0) for _, m := range c.Members { - scheme := schemeFromTLSInfo(m.PeerTLSInfo) + scheme := SchemeFromTLSInfo(m.PeerTLSInfo) for _, l := range m.PeerListeners { addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String())) } @@ -202,40 +213,12 @@ func (c *cluster) fillClusterForMembers() error { return nil } -func newCluster(t testutil.TB, cfg *ClusterConfig) *cluster { - testutil.SkipTestIfShortMode(t, "Cannot start etcd cluster in --short tests") - - c := &cluster{cfg: cfg} - ms := make([]*member, cfg.Size) - for i := 0; i < cfg.Size; i++ { - ms[i] = c.mustNewMember(t) - } - c.Members = ms - if err := c.fillClusterForMembers(); err != nil { - t.Fatal(err) - } - - return c -} - -// NewCluster returns an unlaunched cluster of the given size which has been -// set to use static bootstrap. -func NewCluster(t testutil.TB, size int) *cluster { - t.Helper() - return newCluster(t, &ClusterConfig{Size: size}) -} - -// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration -func NewClusterByConfig(t testutil.TB, cfg *ClusterConfig) *cluster { - return newCluster(t, cfg) -} - -func (c *cluster) Launch(t testutil.TB) { +func (c *Cluster) Launch(t testutil.TB) { errc := make(chan error) for _, m := range c.Members { // Members are launched in separate goroutines because if they boot // using discovery url, they have to wait for others to register to continue. - go func(m *member) { + go func(m *Member) { errc <- m.Launch() }(m) } @@ -245,45 +228,21 @@ func (c *cluster) Launch(t testutil.TB) { t.Fatalf("error setting up member: %v", err) } } - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.ProtoMembers()) c.waitVersion() for _, m := range c.Members { - t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCAddr()) - } -} - -func (c *cluster) URL(i int) string { - return c.Members[i].ClientURLs[0].String() -} - -// URLs returns a list of all active client URLs in the cluster -func (c *cluster) URLs() []string { - return getMembersURLs(c.Members) -} - -func getMembersURLs(members []*member) []string { - urls := make([]string, 0) - for _, m := range members { - select { - case <-m.s.StopNotify(): - continue - default: - } - for _, u := range m.ClientURLs { - urls = append(urls, u.String()) - } + t.Logf(" - %v -> %v (%v)", m.Name, m.ID(), m.GRPCURL()) } - return urls } -// HTTPMembers returns a list of all active members as client.Members -func (c *cluster) HTTPMembers() []client.Member { - ms := []client.Member{} +// ProtoMembers returns a list of all active members as client.Members +func (c *Cluster) ProtoMembers() []*pb.Member { + ms := []*pb.Member{} for _, m := range c.Members { - pScheme := schemeFromTLSInfo(m.PeerTLSInfo) - cScheme := schemeFromTLSInfo(m.ClientTLSInfo) - cm := client.Member{Name: m.Name} + pScheme := SchemeFromTLSInfo(m.PeerTLSInfo) + cScheme := SchemeFromTLSInfo(m.ClientTLSInfo) + cm := &pb.Member{Name: m.Name} for _, ln := range m.PeerListeners { cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String()) } @@ -295,49 +254,52 @@ func (c *cluster) HTTPMembers() []client.Member { return ms } -func (c *cluster) mustNewMember(t testutil.TB) *member { - m := mustNewMember(t, - memberConfig{ - name: c.generateMemberName(), - authToken: c.cfg.AuthToken, - peerTLS: c.cfg.PeerTLS, - clientTLS: c.cfg.ClientTLS, - quotaBackendBytes: c.cfg.QuotaBackendBytes, - maxTxnOps: c.cfg.MaxTxnOps, - maxRequestBytes: c.cfg.MaxRequestBytes, - snapshotCount: c.cfg.SnapshotCount, - snapshotCatchUpEntries: c.cfg.SnapshotCatchUpEntries, - grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime, - grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval, - grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout, - clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize, - clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize, - useIP: c.cfg.UseIP, - enableLeaseCheckpoint: c.cfg.EnableLeaseCheckpoint, - leaseCheckpointInterval: c.cfg.LeaseCheckpointInterval, - WatchProgressNotifyInterval: c.cfg.WatchProgressNotifyInterval, +func (c *Cluster) mustNewMember(t testutil.TB) *Member { + memberNumber := c.LastMemberNum + c.LastMemberNum++ + m := MustNewMember(t, + MemberConfig{ + Name: fmt.Sprintf("m%v", memberNumber-1), + MemberNumber: memberNumber, + AuthToken: c.Cfg.AuthToken, + PeerTLS: c.Cfg.PeerTLS, + ClientTLS: c.Cfg.ClientTLS, + QuotaBackendBytes: c.Cfg.QuotaBackendBytes, + MaxTxnOps: c.Cfg.MaxTxnOps, + MaxRequestBytes: c.Cfg.MaxRequestBytes, + SnapshotCount: c.Cfg.SnapshotCount, + SnapshotCatchUpEntries: c.Cfg.SnapshotCatchUpEntries, + GrpcKeepAliveMinTime: c.Cfg.GRPCKeepAliveMinTime, + GrpcKeepAliveInterval: c.Cfg.GRPCKeepAliveInterval, + GrpcKeepAliveTimeout: c.Cfg.GRPCKeepAliveTimeout, + ClientMaxCallSendMsgSize: c.Cfg.ClientMaxCallSendMsgSize, + ClientMaxCallRecvMsgSize: c.Cfg.ClientMaxCallRecvMsgSize, + UseIP: c.Cfg.UseIP, + UseBridge: c.Cfg.UseBridge, + UseTCP: c.Cfg.UseTCP, + EnableLeaseCheckpoint: c.Cfg.EnableLeaseCheckpoint, + LeaseCheckpointInterval: c.Cfg.LeaseCheckpointInterval, + LeaseCheckpointPersist: c.Cfg.LeaseCheckpointPersist, + WatchProgressNotifyInterval: c.Cfg.WatchProgressNotifyInterval, + ExperimentalMaxLearners: c.Cfg.ExperimentalMaxLearners, + StrictReconfigCheck: c.Cfg.StrictReconfigCheck, + CorruptCheckTime: c.Cfg.CorruptCheckTime, }) - m.DiscoveryURL = c.cfg.DiscoveryURL - if c.cfg.UseGRPC { - if err := m.listenGRPC(); err != nil { - t.Fatal(err) - } - } + m.DiscoveryURL = c.Cfg.DiscoveryURL return m } // addMember return PeerURLs of the added member. -func (c *cluster) addMember(t testutil.TB) types.URLs { +func (c *Cluster) addMember(t testutil.TB) types.URLs { m := c.mustNewMember(t) - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) - // send add request to the cluster + // send add request to the Cluster var err error for i := 0; i < len(c.Members); i++ { - clientURL := c.URL(i) peerURL := scheme + "://" + m.PeerListeners[0].Addr().String() - if err = c.addMemberByURL(t, clientURL, peerURL); err == nil { + if err = c.AddMemberByURL(t, c.Members[i].Client, peerURL); err == nil { break } } @@ -355,115 +317,106 @@ func (c *cluster) addMember(t testutil.TB) types.URLs { t.Fatal(err) } c.Members = append(c.Members, m) - // wait cluster to be stable to receive future client requests - c.waitMembersMatch(t, c.HTTPMembers()) + // wait Cluster to be stable to receive future client requests + c.WaitMembersMatch(t, c.ProtoMembers()) return m.PeerURLs } -func (c *cluster) addMemberByURL(t testutil.TB, clientURL, peerURL string) error { - cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err := ma.Add(ctx, peerURL) +func (c *Cluster) AddMemberByURL(t testutil.TB, cc *clientv3.Client, peerURL string) error { + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) + _, err := cc.MemberAdd(ctx, []string{peerURL}) cancel() if err != nil { return err } - // wait for the add node entry applied in the cluster - members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) - c.waitMembersMatch(t, members) + // wait for the add node entry applied in the Cluster + members := append(c.ProtoMembers(), &pb.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}}) + c.WaitMembersMatch(t, members) return nil } // AddMember return PeerURLs of the added member. -func (c *cluster) AddMember(t testutil.TB) types.URLs { +func (c *Cluster) AddMember(t testutil.TB) types.URLs { return c.addMember(t) } -func (c *cluster) RemoveMember(t testutil.TB, id uint64) { - if err := c.removeMember(t, id); err != nil { - t.Fatal(err) - } -} +func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) error { + // send remove request to the Cluster -func (c *cluster) removeMember(t testutil.TB, id uint64) error { - // send remove request to the cluster - cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - err := ma.Remove(ctx, types.ID(id).String()) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) + _, err := cc.MemberRemove(ctx, id) cancel() if err != nil { return err } - newMembers := make([]*member, 0) + newMembers := make([]*Member, 0) for _, m := range c.Members { - if uint64(m.s.ID()) != id { + if uint64(m.Server.ID()) != id { newMembers = append(newMembers, m) } else { + m.Client.Close() select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): m.Terminate(t) // 1s stop delay + election timeout + 1s disk and network delay + connection write timeout // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 - case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.s.ID()) + case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout): + t.Fatalf("failed to remove member %s in time", m.Server.ID()) } } } c.Members = newMembers - c.waitMembersMatch(t, c.HTTPMembers()) + c.WaitMembersMatch(t, c.ProtoMembers()) return nil } -func (c *cluster) Terminate(t testutil.TB) { - var wg sync.WaitGroup - wg.Add(len(c.Members)) +func (c *Cluster) WaitMembersMatch(t testutil.TB, membs []*pb.Member) { + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) + defer cancel() for _, m := range c.Members { - go func(mm *member) { - defer wg.Done() - mm.Terminate(t) - }(m) - } - wg.Wait() -} - -func (c *cluster) waitMembersMatch(t testutil.TB, membs []client.Member) { - for _, u := range c.URLs() { - cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS) - ma := client.NewMembersAPI(cc) + cc := ToGRPC(m.Client) + select { + case <-m.Server.StopNotify(): + continue + default: + } for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - ms, err := ma.List(ctx) - cancel() - if err == nil && isMembersEqual(ms, membs) { + resp, err := cc.Cluster.MemberList(ctx, &pb.MemberListRequest{Linearizable: false}) + if errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } + if err != nil { + continue + } + if isMembersEqual(resp.Members, membs) { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } // WaitLeader returns index of the member in c.Members that is leader (or -1). -func (c *cluster) WaitLeader(t testutil.TB) int { return c.waitLeader(t, c.Members) } +func (c *Cluster) WaitLeader(t testutil.TB) int { return c.WaitMembersForLeader(t, c.Members) } -// waitLeader waits until given members agree on the same leader, +// WaitMembersForLeader waits until given members agree on the same leader, // and returns its 'index' in the 'membs' list (or -1). -func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { +func (c *Cluster) WaitMembersForLeader(t testutil.TB, membs []*Member) int { possibleLead := make(map[uint64]bool) var lead uint64 for _, m := range membs { - possibleLead[uint64(m.s.ID())] = true + possibleLead[uint64(m.Server.ID())] = true + } + cc, err := c.ClusterClient() + if err != nil { + t.Fatal(err) } - cc := MustNewHTTPClient(t, getMembersURLs(membs), nil) - kapi := client.NewKeysAPI(cc) - // ensure leader is up via linearizable get for { - ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second) - _, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true}) + ctx, cancel := context.WithTimeout(context.Background(), 10*TickDuration+time.Second) + _, err := cc.Get(ctx, "0") cancel() if err == nil || strings.Contains(err.Error(), "Key not found") { break @@ -474,21 +427,21 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { lead = 0 for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if lead != 0 && lead != m.s.Lead() { + if lead != 0 && lead != m.Server.Lead() { lead = 0 - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } - lead = m.s.Lead() + lead = m.Server.Lead() } } for i, m := range membs { - if uint64(m.s.ID()) == lead { + if uint64(m.Server.ID()) == lead { return i } } @@ -496,54 +449,51 @@ func (c *cluster) waitLeader(t testutil.TB, membs []*member) int { return -1 } -func (c *cluster) WaitNoLeader() { c.waitNoLeader(c.Members) } +func (c *Cluster) WaitNoLeader() { c.WaitMembersNoLeader(c.Members) } -// waitNoLeader waits until given members lose leader. -func (c *cluster) waitNoLeader(membs []*member) { +// WaitMembersNoLeader waits until given members lose leader. +func (c *Cluster) WaitMembersNoLeader(membs []*Member) { noLeader := false for !noLeader { noLeader = true for _, m := range membs { select { - case <-m.s.StopNotify(): + case <-m.Server.StopNotify(): continue default: } - if m.s.Lead() != 0 { + if m.Server.Lead() != 0 { noLeader = false - time.Sleep(10 * tickDuration) + time.Sleep(10 * TickDuration) break } } } } -func (c *cluster) waitVersion() { +func (c *Cluster) waitVersion() { for _, m := range c.Members { for { - if m.s.ClusterVersion() != nil { + if m.Server.ClusterVersion() != nil { break } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } } // isMembersEqual checks whether two members equal except ID field. // The given wmembs should always set ID field to empty string. -func isMembersEqual(membs []client.Member, wmembs []client.Member) bool { +func isMembersEqual(membs []*pb.Member, wmembs []*pb.Member) bool { sort.Sort(SortableMemberSliceByPeerURLs(membs)) sort.Sort(SortableMemberSliceByPeerURLs(wmembs)) - for i := range membs { - membs[i].ID = "" - } - return reflect.DeepEqual(membs, wmembs) + return cmp.Equal(membs, wmembs, cmpopts.IgnoreFields(pb.Member{}, "ID", "PeerURLs", "ClientURLs")) } func newLocalListener(t testutil.TB) net.Listener { - c := atomic.AddInt64(&localListenCount, 1) + c := atomic.AddInt32(&LocalListenCount, 1) // Go 1.8+ allows only numbers in port - addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid()) + addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+BasePort, os.Getpid()) return NewListenerWithAddr(t, addr) } @@ -555,69 +505,88 @@ func NewListenerWithAddr(t testutil.TB, addr string) net.Listener { return l } -type member struct { +type Member struct { config.ServerConfig + UniqNumber int + MemberNumber int PeerListeners, ClientListeners []net.Listener - grpcListener net.Listener + GrpcListener net.Listener // PeerTLSInfo enables peer TLS when set PeerTLSInfo *transport.TLSInfo // ClientTLSInfo enables client TLS when set ClientTLSInfo *transport.TLSInfo DialOptions []grpc.DialOption - raftHandler *testutil.PauseableHandler - s *etcdserver.EtcdServer - serverClosers []func() - - grpcServerOpts []grpc.ServerOption - grpcServer *grpc.Server - grpcServerPeer *grpc.Server - grpcAddr string - grpcBridge *bridge - - // serverClient is a clientv3 that directly calls the etcdserver. - serverClient *clientv3.Client - - keepDataDirTerminate bool - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - - isLearner bool - closed bool -} - -func (m *member) GRPCAddr() string { return m.grpcAddr } - -type memberConfig struct { - name string - peerTLS *transport.TLSInfo - clientTLS *transport.TLSInfo - authToken string - quotaBackendBytes int64 - maxTxnOps uint - maxRequestBytes uint - snapshotCount uint64 - snapshotCatchUpEntries uint64 - grpcKeepAliveMinTime time.Duration - grpcKeepAliveInterval time.Duration - grpcKeepAliveTimeout time.Duration - clientMaxCallSendMsgSize int - clientMaxCallRecvMsgSize int - useIP bool - enableLeaseCheckpoint bool - leaseCheckpointInterval time.Duration + RaftHandler *testutil.PauseableHandler + Server *etcdserver.EtcdServer + ServerClosers []func() + + GrpcServerOpts []grpc.ServerOption + GrpcServer *grpc.Server + GrpcServerPeer *grpc.Server + GrpcURL string + GrpcBridge *bridge + + // ServerClient is a clientv3 that directly calls the etcdserver. + ServerClient *clientv3.Client + // Client is a clientv3 that communicates via socket, either UNIX or TCP. + Client *clientv3.Client + + KeepDataDirTerminate bool + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool + + IsLearner bool + Closed bool + + GrpcServerRecorder *grpc_testing.GrpcRecorder +} + +func (m *Member) GRPCURL() string { return m.GrpcURL } + +type MemberConfig struct { + Name string + UniqNumber int64 + MemberNumber int + PeerTLS *transport.TLSInfo + ClientTLS *transport.TLSInfo + AuthToken string + QuotaBackendBytes int64 + MaxTxnOps uint + MaxRequestBytes uint + SnapshotCount uint64 + SnapshotCatchUpEntries uint64 + GrpcKeepAliveMinTime time.Duration + GrpcKeepAliveInterval time.Duration + GrpcKeepAliveTimeout time.Duration + ClientMaxCallSendMsgSize int + ClientMaxCallRecvMsgSize int + UseIP bool + UseBridge bool + UseTCP bool + EnableLeaseCheckpoint bool + LeaseCheckpointInterval time.Duration + LeaseCheckpointPersist bool WatchProgressNotifyInterval time.Duration + ExperimentalMaxLearners int + StrictReconfigCheck bool + CorruptCheckTime time.Duration } -// mustNewMember return an inited member with the given name. If peerTLS is +// MustNewMember return an inited member with the given name. If peerTLS is // set, it will use https scheme to communicate between peers. -func mustNewMember(t testutil.TB, mcfg memberConfig) *member { +func MustNewMember(t testutil.TB, mcfg MemberConfig) *Member { var err error - m := &member{} + m := &Member{ + MemberNumber: mcfg.MemberNumber, + UniqNumber: int(atomic.AddInt32(&LocalListenCount, 1)), + } - peerScheme := schemeFromTLSInfo(mcfg.peerTLS) - clientScheme := schemeFromTLSInfo(mcfg.clientTLS) + peerScheme := SchemeFromTLSInfo(mcfg.PeerTLS) + clientScheme := SchemeFromTLSInfo(mcfg.ClientTLS) pln := newLocalListener(t) m.PeerListeners = []net.Listener{pln} @@ -625,7 +594,7 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.PeerTLSInfo = mcfg.peerTLS + m.PeerTLSInfo = mcfg.PeerTLS cln := newLocalListener(t) m.ClientListeners = []net.Listener{cln} @@ -633,82 +602,96 @@ func mustNewMember(t testutil.TB, mcfg memberConfig) *member { if err != nil { t.Fatal(err) } - m.ClientTLSInfo = mcfg.clientTLS + m.ClientTLSInfo = mcfg.ClientTLS - m.Name = mcfg.name + m.Name = mcfg.Name - m.DataDir, err = ioutil.TempDir(t.TempDir(), "etcd") + m.DataDir, err = os.MkdirTemp(t.TempDir(), "etcd") if err != nil { t.Fatal(err) } - clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String()) + clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.Name, peerScheme, pln.Addr().String()) m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr) if err != nil { t.Fatal(err) } - m.InitialClusterToken = clusterName + m.InitialClusterToken = ClusterName m.NewCluster = true m.BootstrapTimeout = 10 * time.Millisecond if m.PeerTLSInfo != nil { m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo } - m.ElectionTicks = electionTicks + m.ElectionTicks = ElectionTicks m.InitialElectionTickAdvance = true - m.TickMs = uint(tickDuration / time.Millisecond) - m.QuotaBackendBytes = mcfg.quotaBackendBytes - m.MaxTxnOps = mcfg.maxTxnOps + m.TickMs = uint(TickDuration / time.Millisecond) + m.QuotaBackendBytes = mcfg.QuotaBackendBytes + m.MaxTxnOps = mcfg.MaxTxnOps if m.MaxTxnOps == 0 { m.MaxTxnOps = embed.DefaultMaxTxnOps } - m.MaxRequestBytes = mcfg.maxRequestBytes + m.MaxRequestBytes = mcfg.MaxRequestBytes if m.MaxRequestBytes == 0 { m.MaxRequestBytes = embed.DefaultMaxRequestBytes } m.SnapshotCount = etcdserver.DefaultSnapshotCount - if mcfg.snapshotCount != 0 { - m.SnapshotCount = mcfg.snapshotCount + if mcfg.SnapshotCount != 0 { + m.SnapshotCount = mcfg.SnapshotCount } m.SnapshotCatchUpEntries = etcdserver.DefaultSnapshotCatchUpEntries - if mcfg.snapshotCatchUpEntries != 0 { - m.SnapshotCatchUpEntries = mcfg.snapshotCatchUpEntries + if mcfg.SnapshotCatchUpEntries != 0 { + m.SnapshotCatchUpEntries = mcfg.SnapshotCatchUpEntries } // for the purpose of integration testing, simple token is enough m.AuthToken = "simple" - if mcfg.authToken != "" { - m.AuthToken = mcfg.authToken + if mcfg.AuthToken != "" { + m.AuthToken = mcfg.AuthToken } m.BcryptCost = uint(bcrypt.MinCost) // use min bcrypt cost to speedy up integration testing - m.grpcServerOpts = []grpc.ServerOption{} - if mcfg.grpcKeepAliveMinTime > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: mcfg.grpcKeepAliveMinTime, + m.GrpcServerOpts = []grpc.ServerOption{} + if mcfg.GrpcKeepAliveMinTime > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: mcfg.GrpcKeepAliveMinTime, PermitWithoutStream: false, })) } - if mcfg.grpcKeepAliveInterval > time.Duration(0) && - mcfg.grpcKeepAliveTimeout > time.Duration(0) { - m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: mcfg.grpcKeepAliveInterval, - Timeout: mcfg.grpcKeepAliveTimeout, + if mcfg.GrpcKeepAliveInterval > time.Duration(0) && + mcfg.GrpcKeepAliveTimeout > time.Duration(0) { + m.GrpcServerOpts = append(m.GrpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: mcfg.GrpcKeepAliveInterval, + Timeout: mcfg.GrpcKeepAliveTimeout, })) } - m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize - m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize - m.useIP = mcfg.useIP - m.EnableLeaseCheckpoint = mcfg.enableLeaseCheckpoint - m.LeaseCheckpointInterval = mcfg.leaseCheckpointInterval + m.ClientMaxCallSendMsgSize = mcfg.ClientMaxCallSendMsgSize + m.ClientMaxCallRecvMsgSize = mcfg.ClientMaxCallRecvMsgSize + m.UseIP = mcfg.UseIP + m.UseBridge = mcfg.UseBridge + m.UseTCP = mcfg.UseTCP + m.EnableLeaseCheckpoint = mcfg.EnableLeaseCheckpoint + m.LeaseCheckpointInterval = mcfg.LeaseCheckpointInterval + m.LeaseCheckpointPersist = mcfg.LeaseCheckpointPersist m.WatchProgressNotifyInterval = mcfg.WatchProgressNotifyInterval m.InitialCorruptCheck = true + if mcfg.CorruptCheckTime > time.Duration(0) { + m.CorruptCheckTime = mcfg.CorruptCheckTime + } m.WarningApplyDuration = embed.DefaultWarningApplyDuration - + m.WarningUnaryRequestDuration = embed.DefaultWarningUnaryRequestDuration + m.ExperimentalMaxLearners = membership.DefaultMaxLearners + if mcfg.ExperimentalMaxLearners != 0 { + m.ExperimentalMaxLearners = mcfg.ExperimentalMaxLearners + } m.V2Deprecation = config.V2_DEPR_DEFAULT - - m.Logger = memberLogger(t, mcfg.name) + m.GrpcServerRecorder = &grpc_testing.GrpcRecorder{} + m.Logger = memberLogger(t, mcfg.Name) + m.StrictReconfigCheck = mcfg.StrictReconfigCheck + if err := m.listenGRPC(); err != nil { + t.Fatal(err) + } t.Cleanup(func() { // if we didn't cleanup the logger, the consecutive test // might reuse this (t). @@ -728,51 +711,116 @@ func memberLogger(t testutil.TB, name string) *zap.Logger { } // listenGRPC starts a grpc server over a unix domain socket on the member -func (m *member) listenGRPC() error { +func (m *Member) listenGRPC() error { // prefix with localhost so cert has right domain - m.grpcAddr = "localhost:" + m.Name - m.Logger.Info("LISTEN GRPC", zap.String("m.grpcAddr", m.grpcAddr), zap.String("m.Name", m.Name)) - if m.useIP { // for IP-only TLS certs - m.grpcAddr = "127.0.0.1:" + m.Name + network, host, port := m.grpcAddr() + grpcAddr := host + ":" + port + m.Logger.Info("LISTEN GRPC", zap.String("grpcAddr", grpcAddr), zap.String("m.Name", m.Name)) + grpcListener, err := net.Listen(network, grpcAddr) + if err != nil { + return fmt.Errorf("listen failed on grpc socket %s (%v)", grpcAddr, err) + } + m.GrpcURL = fmt.Sprintf("%s://%s", m.clientScheme(), grpcAddr) + if m.UseBridge { + _, err = m.addBridge() + if err != nil { + grpcListener.Close() + return err + } } - l, err := transport.NewUnixListener(m.grpcAddr) + m.GrpcListener = grpcListener + return nil +} + +func (m *Member) clientScheme() string { + switch { + case m.UseTCP && m.ClientTLSInfo != nil: + return "https" + case m.UseTCP && m.ClientTLSInfo == nil: + return "http" + case !m.UseTCP && m.ClientTLSInfo != nil: + return "unixs" + case !m.UseTCP && m.ClientTLSInfo == nil: + return "unix" + } + m.Logger.Panic("Failed to determine client schema") + return "" +} + +func (m *Member) addBridge() (*bridge, error) { + network, host, port := m.grpcAddr() + grpcAddr := host + ":" + port + bridgeAddr := grpcAddr + "0" + m.Logger.Info("LISTEN BRIDGE", zap.String("grpc-address", bridgeAddr), zap.String("member", m.Name)) + bridgeListener, err := transport.NewUnixListener(bridgeAddr) if err != nil { - return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err) + return nil, fmt.Errorf("listen failed on bridge socket %s (%v)", bridgeAddr, err) } - m.grpcBridge, err = newBridge(m.grpcAddr) + m.GrpcBridge, err = newBridge(dialer{network: network, addr: grpcAddr}, bridgeListener) if err != nil { - l.Close() - return err + bridgeListener.Close() + return nil, err } - m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr - m.grpcListener = l - return nil + m.GrpcURL = m.clientScheme() + "://" + bridgeAddr + return m.GrpcBridge, nil +} + +func (m *Member) Bridge() *bridge { + if !m.UseBridge { + m.Logger.Panic("Bridge not available. Please configure using bridge before creating Cluster.") + } + return m.GrpcBridge +} + +func (m *Member) grpcAddr() (network, host, port string) { + // prefix with localhost so cert has right domain + host = "localhost" + if m.UseIP { // for IP-only TLS certs + host = "127.0.0.1" + } + network = "unix" + if m.UseTCP { + network = "tcp" + } + port = m.Name + if m.UseTCP { + port = fmt.Sprintf("%d", GrpcPortNumber(m.UniqNumber, m.MemberNumber)) + } + return network, host, port +} + +func GrpcPortNumber(uniqNumber, memberNumber int) int { + return BaseGRPCPort + uniqNumber*10 + memberNumber +} + +type dialer struct { + network string + addr string } -func (m *member) ElectionTimeout() time.Duration { - return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond +func (d dialer) Dial() (net.Conn, error) { + return net.Dial(d.network, d.addr) } -func (m *member) ID() types.ID { return m.s.ID() } +func (m *Member) ElectionTimeout() time.Duration { + return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond +} -func (m *member) DropConnections() { m.grpcBridge.Reset() } -func (m *member) PauseConnections() { m.grpcBridge.Pause() } -func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } -func (m *member) Blackhole() { m.grpcBridge.Blackhole() } -func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() } +func (m *Member) ID() types.ID { return m.Server.ID() } // NewClientV3 creates a new grpc client connection to the member -func NewClientV3(m *member) (*clientv3.Client, error) { - if m.grpcAddr == "" { +func NewClientV3(m *Member) (*clientv3.Client, error) { + if m.GrpcURL == "" { return nil, fmt.Errorf("member not configured for grpc") } cfg := clientv3.Config{ - Endpoints: []string{m.grpcAddr}, + Endpoints: []string{m.GrpcURL}, DialTimeout: 5 * time.Second, DialOptions: []grpc.DialOption{grpc.WithBlock()}, - MaxCallSendMsgSize: m.clientMaxCallSendMsgSize, - MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize, + MaxCallSendMsgSize: m.ClientMaxCallSendMsgSize, + MaxCallRecvMsgSize: m.ClientMaxCallRecvMsgSize, + Logger: m.Logger.Named("client"), } if m.ClientTLSInfo != nil { @@ -785,13 +833,13 @@ func NewClientV3(m *member) (*clientv3.Client, error) { if m.DialOptions != nil { cfg.DialOptions = append(cfg.DialOptions, m.DialOptions...) } - return newClientV3(cfg, m.Logger.Named("client")) + return newClientV3(cfg) } // Clone returns a member with the same server configuration. The returned // member will not set PeerListeners and ClientListeners. -func (m *member) Clone(t testutil.TB) *member { - mm := &member{} +func (m *Member) Clone(t testutil.TB) *Member { + mm := &Member{} mm.ServerConfig = m.ServerConfig var err error @@ -823,20 +871,20 @@ func (m *member) Clone(t testutil.TB) *member { // Launch starts a member based on ServerConfig, PeerListeners // and ClientListeners. -func (m *member) Launch() error { +func (m *Member) Launch() error { m.Logger.Info( "launching a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) var err error - if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil { + if m.Server, err = etcdserver.NewServer(m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } - m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) - m.s.Start() + m.Server.SyncTicker = time.NewTicker(500 * time.Millisecond) + m.Server.Start() var peerTLScfg *tls.Config if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() { @@ -845,7 +893,7 @@ func (m *member) Launch() error { } } - if m.grpcListener != nil { + if m.GrpcListener != nil { var ( tlscfg *tls.Config ) @@ -855,23 +903,23 @@ func (m *member) Launch() error { return err } } - m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...) - m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg) - m.serverClient = v3client.New(m.s) - lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient)) - epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient)) - go m.grpcServer.Serve(m.grpcListener) + m.GrpcServer = v3rpc.Server(m.Server, tlscfg, m.GrpcServerRecorder.UnaryInterceptor(), m.GrpcServerOpts...) + m.GrpcServerPeer = v3rpc.Server(m.Server, peerTLScfg, m.GrpcServerRecorder.UnaryInterceptor()) + m.ServerClient = v3client.New(m.Server) + lockpb.RegisterLockServer(m.GrpcServer, v3lock.NewLockServer(m.ServerClient)) + epb.RegisterElectionServer(m.GrpcServer, v3election.NewElectionServer(m.ServerClient)) + go m.GrpcServer.Serve(m.GrpcListener) } - m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.s)} + m.RaftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.Logger, m.Server)} - h := (http.Handler)(m.raftHandler) - if m.grpcListener != nil { + h := (http.Handler)(m.RaftHandler) + if m.GrpcListener != nil { h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { - m.grpcServerPeer.ServeHTTP(w, r) + m.GrpcServerPeer.ServeHTTP(w, r) } else { - m.raftHandler.ServeHTTP(w, r) + m.RaftHandler.ServeHTTP(w, r) } }) } @@ -881,9 +929,9 @@ func (m *member) Launch() error { // don't hang on matcher after closing listener cm.SetReadTimeout(time.Second) - if m.grpcServer != nil { + if m.GrpcServer != nil { grpcl := cm.Match(cmux.HTTP2()) - go m.grpcServerPeer.Serve(grpcl) + go m.GrpcServerPeer.Serve(grpcl) } // serve http1/http2 rafthttp/grpc @@ -898,7 +946,7 @@ func (m *member) Launch() error { Config: &http.Server{ Handler: h, TLSConfig: peerTLScfg, - ErrorLog: log.New(ioutil.Discard, "net/http", 0), + ErrorLog: log.New(io.Discard, "net/http", 0), }, TLS: peerTLScfg, } @@ -915,7 +963,7 @@ func (m *member) Launch() error { hs.Close() <-donec } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) } for _, ln := range m.ClientListeners { hs := &httptest.Server{ @@ -923,10 +971,10 @@ func (m *member) Launch() error { Config: &http.Server{ Handler: v2http.NewClientHandler( m.Logger, - m.s, + m.Server, m.ServerConfig.ReqTimeout(), ), - ErrorLog: log.New(ioutil.Discard, "net/http", 0), + ErrorLog: log.New(io.Discard, "net/http", 0), }, } if m.ClientTLSInfo == nil { @@ -943,7 +991,7 @@ func (m *member) Launch() error { // Previously, // 1. Server has non-empty (*tls.Config).Certificates on client hello // 2. Server calls (*tls.Config).GetCertificate iff: - // - Server's (*tls.Config).Certificates is not empty, or + // - Server'Server (*tls.Config).Certificates is not empty, or // - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName // // When (*tls.Config).Certificates is always populated on initial handshake, @@ -961,7 +1009,7 @@ func (m *member) Launch() error { // // This introduces another problem with "httptest.Server": // when server initial certificates are empty, certificates - // are overwritten by Go's internal test certs, which have + // are overwritten by Go'Server internal test certs, which have // different SAN fields (e.g. example.com). To work around, // re-overwrite (*tls.Config).Certificates before starting // test server. @@ -978,7 +1026,13 @@ func (m *member) Launch() error { hs.CloseClientConnections() hs.Close() } - m.serverClosers = append(m.serverClosers, closer) + m.ServerClosers = append(m.ServerClosers, closer) + } + if m.GrpcURL != "" && m.Client == nil { + m.Client, err = NewClientV3(m) + if err != nil { + return err + } } m.Logger.Info( @@ -986,26 +1040,28 @@ func (m *member) Launch() error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) return nil } -func (m *member) WaitOK(t testutil.TB) { +func (m *Member) RecordedRequests() []grpc_testing.RequestInfo { + return m.GrpcServerRecorder.RecordedRequests() +} + +func (m *Member) WaitOK(t testutil.TB) { m.WaitStarted(t) - for m.s.Leader() == 0 { - time.Sleep(tickDuration) + for m.Server.Leader() == 0 { + time.Sleep(TickDuration) } } -func (m *member) WaitStarted(t testutil.TB) { - cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo) - kapi := client.NewKeysAPI(cc) +func (m *Member) WaitStarted(t testutil.TB) { for { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) - _, err := kapi.Get(ctx, "/", nil) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) + _, err := m.Client.Get(ctx, "/", clientv3.WithSerializable()) if err != nil { - time.Sleep(tickDuration) + time.Sleep(TickDuration) continue } cancel() @@ -1014,51 +1070,51 @@ func (m *member) WaitStarted(t testutil.TB) { } func WaitClientV3(t testutil.TB, kv clientv3.KV) { - timeout := time.Now().Add(requestTimeout) + timeout := time.Now().Add(RequestTimeout) var err error for time.Now().Before(timeout) { - ctx, cancel := context.WithTimeout(context.Background(), requestTimeout) + ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout) _, err = kv.Get(ctx, "/") cancel() if err == nil { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } if err != nil { t.Fatalf("timed out waiting for client: %v", err) } } -func (m *member) URL() string { return m.ClientURLs[0].String() } +func (m *Member) URL() string { return m.ClientURLs[0].String() } -func (m *member) Pause() { - m.raftHandler.Pause() - m.s.PauseSending() +func (m *Member) Pause() { + m.RaftHandler.Pause() + m.Server.PauseSending() } -func (m *member) Resume() { - m.raftHandler.Resume() - m.s.ResumeSending() +func (m *Member) Resume() { + m.RaftHandler.Resume() + m.Server.ResumeSending() } -// Close stops the member's etcdserver and closes its connections -func (m *member) Close() { - if m.grpcBridge != nil { - m.grpcBridge.Close() - m.grpcBridge = nil +// Close stops the member'Server etcdserver and closes its connections +func (m *Member) Close() { + if m.GrpcBridge != nil { + m.GrpcBridge.Close() + m.GrpcBridge = nil } - if m.serverClient != nil { - m.serverClient.Close() - m.serverClient = nil + if m.ServerClient != nil { + m.ServerClient.Close() + m.ServerClient = nil } - if m.grpcServer != nil { + if m.GrpcServer != nil { ch := make(chan struct{}) go func() { defer close(ch) // close listeners to stop accepting new connections, // will block on any existing transports - m.grpcServer.GracefulStop() + m.GrpcServer.GracefulStop() }() // wait until all pending RPCs are finished select { @@ -1066,21 +1122,21 @@ func (m *member) Close() { case <-time.After(2 * time.Second): // took too long, manually close open transports // e.g. watch streams - m.grpcServer.Stop() + m.GrpcServer.Stop() <-ch } - m.grpcServer = nil - m.grpcServerPeer.GracefulStop() - m.grpcServerPeer.Stop() - m.grpcServerPeer = nil + m.GrpcServer = nil + m.GrpcServerPeer.GracefulStop() + m.GrpcServerPeer.Stop() + m.GrpcServerPeer = nil } - if m.s != nil { - m.s.HardStop() + if m.Server != nil { + m.Server.HardStop() } - for _, f := range m.serverClosers { + for _, f := range m.ServerClosers { f() } - if !m.closed { + if !m.Closed { // Avoid verification of the same file multiple times // (that might not exist any longer) verify.MustVerifyIfEnabled(verify.Config{ @@ -1089,51 +1145,51 @@ func (m *member) Close() { ExactIndex: false, }) } - m.closed = true + m.Closed = true } // Stop stops the member, but the data dir of the member is preserved. -func (m *member) Stop(_ testutil.TB) { +func (m *Member) Stop(_ testutil.TB) { m.Logger.Info( "stopping a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - m.serverClosers = nil + m.ServerClosers = nil m.Logger.Info( "stopped a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) } -// checkLeaderTransition waits for leader transition, returning the new leader ID. -func checkLeaderTransition(m *member, oldLead uint64) uint64 { - interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond - for m.s.Lead() == 0 || (m.s.Lead() == oldLead) { +// CheckLeaderTransition waits for leader transition, returning the new leader ID. +func CheckLeaderTransition(m *Member, oldLead uint64) uint64 { + interval := time.Duration(m.Server.Cfg.TickMs) * time.Millisecond + for m.Server.Lead() == 0 || (m.Server.Lead() == oldLead) { time.Sleep(interval) } - return m.s.Lead() + return m.Server.Lead() } // StopNotify unblocks when a member stop completes -func (m *member) StopNotify() <-chan struct{} { - return m.s.StopNotify() +func (m *Member) StopNotify() <-chan struct{} { + return m.Server.StopNotify() } // Restart starts the member using the preserved data dir. -func (m *member) Restart(t testutil.TB) error { +func (m *Member) Restart(t testutil.TB) error { m.Logger.Info( "restarting a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) newPeerListeners := make([]net.Listener, 0) for _, ln := range m.PeerListeners { @@ -1146,7 +1202,7 @@ func (m *member) Restart(t testutil.TB) error { } m.ClientListeners = newClientListeners - if m.grpcListener != nil { + if m.GrpcListener != nil { if err := m.listenGRPC(); err != nil { t.Fatal(err) } @@ -1158,23 +1214,23 @@ func (m *member) Restart(t testutil.TB) error { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), zap.Error(err), ) return err } // Terminate stops the member and removes the data dir. -func (m *member) Terminate(t testutil.TB) { +func (m *Member) Terminate(t testutil.TB) { m.Logger.Info( "terminating a member", zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) m.Close() - if !m.keepDataDirTerminate { + if !m.KeepDataDirTerminate { if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil { t.Fatal(err) } @@ -1184,12 +1240,12 @@ func (m *member) Terminate(t testutil.TB) { zap.String("name", m.Name), zap.Strings("advertise-peer-urls", m.PeerURLs.StringSlice()), zap.Strings("listen-client-urls", m.ClientURLs.StringSlice()), - zap.String("grpc-address", m.grpcAddr), + zap.String("grpc-url", m.GrpcURL), ) } // Metric gets the metric value for a member -func (m *member) Metric(metricName string, expectLabels ...string) (string, error) { +func (m *Member) Metric(metricName string, expectLabels ...string) (string, error) { cfgtls := transport.TLSInfo{} tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second) if err != nil { @@ -1201,7 +1257,7 @@ func (m *member) Metric(metricName string, expectLabels ...string) (string, erro return "", err } defer resp.Body.Close() - b, rerr := ioutil.ReadAll(resp.Body) + b, rerr := io.ReadAll(resp.Body) if rerr != nil { return "", rerr } @@ -1226,50 +1282,28 @@ func (m *member) Metric(metricName string, expectLabels ...string) (string, erro } // InjectPartition drops connections from m to others, vice versa. -func (m *member) InjectPartition(t testutil.TB, others ...*member) { +func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.CutPeer(other.s.ID()) - other.s.CutPeer(m.s.ID()) - t.Logf("network partition injected between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.CutPeer(other.Server.ID()) + other.Server.CutPeer(m.Server.ID()) + t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } // RecoverPartition recovers connections from m to others, vice versa. -func (m *member) RecoverPartition(t testutil.TB, others ...*member) { +func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.s.MendPeer(other.s.ID()) - other.s.MendPeer(m.s.ID()) - t.Logf("network partition between: %v <-> %v", m.s.ID(), other.s.ID()) + m.Server.MendPeer(other.Server.ID()) + other.Server.MendPeer(m.Server.ID()) + t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID()) } } -func (m *member) ReadyNotify() <-chan struct{} { - return m.s.ReadyNotify() -} - -func MustNewHTTPClient(t testutil.TB, eps []string, tls *transport.TLSInfo) client.Client { - cfgtls := transport.TLSInfo{} - if tls != nil { - cfgtls = *tls - } - cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps} - c, err := client.New(cfg) - if err != nil { - t.Fatal(err) - } - return c +func (m *Member) ReadyNotify() <-chan struct{} { + return m.Server.ReadyNotify() } -func mustNewTransport(t testutil.TB, tlsInfo transport.TLSInfo) *http.Transport { - // tick in integration test is short, so 1s dial timeout could play well. - tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout) - if err != nil { - t.Fatal(err) - } - return tr -} - -type SortableMemberSliceByPeerURLs []client.Member +type SortableMemberSliceByPeerURLs []*pb.Member func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) } func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { @@ -1277,74 +1311,100 @@ func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool { } func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -type ClusterV3 struct { - *cluster - - mu sync.Mutex - clients []*clientv3.Client -} - -// NewClusterV3 returns a launched cluster with a grpc client connection -// for each cluster member. -func NewClusterV3(t testutil.TB, cfg *ClusterConfig) *ClusterV3 { +// NewCluster returns a launched Cluster with a grpc client connection +// for each Cluster member. +func NewCluster(t testutil.TB, cfg *ClusterConfig) *Cluster { t.Helper() assertInTestContext(t) - cfg.UseGRPC = true + testutil.SkipTestIfShortMode(t, "Cannot start etcd Cluster in --short tests") - clus := &ClusterV3{ - cluster: NewClusterByConfig(t, cfg), + c := &Cluster{Cfg: cfg} + ms := make([]*Member, cfg.Size) + for i := 0; i < cfg.Size; i++ { + ms[i] = c.mustNewMember(t) } - clus.Launch(t) - - if !cfg.SkipCreatingClient { - for _, m := range clus.Members { - client, err := NewClientV3(m) - if err != nil { - t.Fatalf("cannot create client: %v", err) - } - clus.clients = append(clus.clients, client) - } + c.Members = ms + if err := c.fillClusterForMembers(); err != nil { + t.Fatal(err) } + c.Launch(t) - return clus + return c } -func (c *ClusterV3) TakeClient(idx int) { +func (c *Cluster) TakeClient(idx int) { c.mu.Lock() - c.clients[idx] = nil + c.Members[idx].Client = nil c.mu.Unlock() } -func (c *ClusterV3) Terminate(t testutil.TB) { +func (c *Cluster) Terminate(t testutil.TB) { c.mu.Lock() - for _, client := range c.clients { - if client == nil { - continue - } - if err := client.Close(); err != nil { + if c.clusterClient != nil { + if err := c.clusterClient.Close(); err != nil { t.Error(err) } } c.mu.Unlock() - c.cluster.Terminate(t) + for _, m := range c.Members { + if m.Client != nil { + m.Client.Close() + } + } + var wg sync.WaitGroup + wg.Add(len(c.Members)) + for _, m := range c.Members { + go func(mm *Member) { + defer wg.Done() + mm.Terminate(t) + }(m) + } + wg.Wait() } -func (c *ClusterV3) RandClient() *clientv3.Client { - return c.clients[rand.Intn(len(c.clients))] +func (c *Cluster) RandClient() *clientv3.Client { + return c.Members[rand.Intn(len(c.Members))].Client } -func (c *ClusterV3) Client(i int) *clientv3.Client { - return c.clients[i] +func (c *Cluster) Client(i int) *clientv3.Client { + return c.Members[i].Client +} + +func (c *Cluster) Endpoints() []string { + var endpoints []string + for _, m := range c.Members { + endpoints = append(endpoints, m.GrpcURL) + } + return endpoints +} + +func (c *Cluster) ClusterClient() (client *clientv3.Client, err error) { + if c.clusterClient == nil { + endpoints := []string{} + for _, m := range c.Members { + endpoints = append(endpoints, m.GrpcURL) + } + cfg := clientv3.Config{ + Endpoints: endpoints, + DialTimeout: 5 * time.Second, + DialOptions: []grpc.DialOption{grpc.WithBlock()}, + } + c.clusterClient, err = newClientV3(cfg) + if err != nil { + return nil, err + } + } + return c.clusterClient, nil } // NewClientV3 creates a new grpc client connection to the member -func (c *ClusterV3) NewClientV3(memberIndex int) (*clientv3.Client, error) { +func (c *Cluster) NewClientV3(memberIndex int) (*clientv3.Client, error) { return NewClientV3(c.Members[memberIndex]) } -func makeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client { +func makeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client, chooseMemberIndex func() int) func() *clientv3.Client { var mu sync.Mutex *clients = nil return func() *clientv3.Client { @@ -1361,13 +1421,13 @@ func makeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client, ch // MakeSingleNodeClients creates factory of clients that all connect to member 0. // All the created clients are put on the 'clients' list. The factory is thread-safe. -func MakeSingleNodeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client { +func MakeSingleNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { return makeClients(t, clus, clients, func() int { return 0 }) } // MakeMultiNodeClients creates factory of clients that all connect to random members. // All the created clients are put on the 'clients' list. The factory is thread-safe. -func MakeMultiNodeClients(t testutil.TB, clus *ClusterV3, clients *[]*clientv3.Client) func() *clientv3.Client { +func MakeMultiNodeClients(t testutil.TB, clus *Cluster, clients *[]*clientv3.Client) func() *clientv3.Client { return makeClients(t, clus, clients, func() int { return rand.Intn(len(clus.Members)) }) } @@ -1380,27 +1440,27 @@ func CloseClients(t testutil.TB, clients []*clientv3.Client) { } } -type grpcAPI struct { - // Cluster is the cluster API for the client's connection. +type GrpcAPI struct { + // Cluster is the Cluster API for the client'Server connection. Cluster pb.ClusterClient - // KV is the keyvalue API for the client's connection. + // KV is the keyvalue API for the client'Server connection. KV pb.KVClient - // Lease is the lease API for the client's connection. + // Lease is the lease API for the client'Server connection. Lease pb.LeaseClient - // Watch is the watch API for the client's connection. + // Watch is the watch API for the client'Server connection. Watch pb.WatchClient - // Maintenance is the maintenance API for the client's connection. + // Maintenance is the maintenance API for the client'Server connection. Maintenance pb.MaintenanceClient - // Auth is the authentication API for the client's connection. + // Auth is the authentication API for the client'Server connection. Auth pb.AuthClient - // Lock is the lock API for the client's connection. + // Lock is the lock API for the client'Server connection. Lock lockpb.LockClient - // Election is the election API for the client's connection. + // Election is the election API for the client'Server connection. Election epb.ElectionClient } -// GetLearnerMembers returns the list of learner members in cluster using MemberList API. -func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { +// GetLearnerMembers returns the list of learner members in Cluster using MemberList API. +func (c *Cluster) GetLearnerMembers() ([]*pb.Member, error) { cli := c.Client(0) resp, err := cli.MemberList(context.Background()) if err != nil { @@ -1415,13 +1475,13 @@ func (c *ClusterV3) GetLearnerMembers() ([]*pb.Member, error) { return learners, nil } -// AddAndLaunchLearnerMember creates a leaner member, adds it to cluster +// AddAndLaunchLearnerMember creates a leaner member, adds it to Cluster // via v3 MemberAdd API, and then launches the new member. -func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { +func (c *Cluster) AddAndLaunchLearnerMember(t testutil.TB) { m := c.mustNewMember(t) - m.isLearner = true + m.IsLearner = true - scheme := schemeFromTLSInfo(c.cfg.PeerTLS) + scheme := SchemeFromTLSInfo(c.Cfg.PeerTLS) peerURLs := []string{scheme + "://" + m.PeerListeners[0].Addr().String()} cli := c.Client(0) @@ -1446,15 +1506,15 @@ func (c *ClusterV3) AddAndLaunchLearnerMember(t testutil.TB) { c.waitMembersMatch(t) } -// getMembers returns a list of members in cluster, in format of etcdserverpb.Member -func (c *ClusterV3) getMembers() []*pb.Member { +// getMembers returns a list of members in Cluster, in format of etcdserverpb.Member +func (c *Cluster) getMembers() []*pb.Member { var mems []*pb.Member for _, m := range c.Members { mem := &pb.Member{ Name: m.Name, PeerURLs: m.PeerURLs.StringSlice(), ClientURLs: m.ClientURLs.StringSlice(), - IsLearner: m.isLearner, + IsLearner: m.IsLearner, } mems = append(mems, mem) } @@ -1462,29 +1522,29 @@ func (c *ClusterV3) getMembers() []*pb.Member { } // waitMembersMatch waits until v3rpc MemberList returns the 'same' members info as the -// local 'c.Members', which is the local recording of members in the testing cluster. With +// local 'c.Members', which is the local recording of members in the testing Cluster. With // the exception that the local recording c.Members does not have info on Member.ID, which -// is generated when the member is been added to cluster. +// is generated when the member is been added to Cluster. // // Note: // A successful match means the Member.clientURLs are matched. This means member has already -// finished publishing its server attributes to cluster. Publishing attributes is a cluster-wide +// finished publishing its server attributes to Cluster. Publishing attributes is a Cluster-wide // write request (in v2 server). Therefore, at this point, any raft log entries prior to this // would have already been applied. // -// If a new member was added to an existing cluster, at this point, it has finished publishing -// its own server attributes to the cluster. And therefore by the same argument, it has already +// If a new member was added to an existing Cluster, at this point, it has finished publishing +// its own server attributes to the Cluster. And therefore by the same argument, it has already // applied the raft log entries (especially those of type raftpb.ConfChangeType). At this point, -// the new member has the correct view of the cluster configuration. +// the new member has the correct view of the Cluster configuration. // // Special note on learner member: -// Learner member is only added to a cluster via v3rpc MemberAdd API (as of v3.4). When starting -// the learner member, its initial view of the cluster created by peerURLs map does not have info +// Learner member is only added to a Cluster via v3rpc MemberAdd API (as of v3.4). When starting +// the learner member, its initial view of the Cluster created by peerURLs map does not have info // on whether or not the new member itself is learner. But at this point, a successful match does // indicate that the new learner member has applied the raftpb.ConfChangeAddLearnerNode entry -// which was used to add the learner itself to the cluster, and therefore it has the correct info +// which was used to add the learner itself to the Cluster, and therefore it has the correct info // on learner. -func (c *ClusterV3) waitMembersMatch(t testutil.TB) { +func (c *Cluster) waitMembersMatch(t testutil.TB) { wMembers := c.getMembers() sort.Sort(SortableProtoMemberSliceByPeerURLs(wMembers)) cli := c.Client(0) @@ -1505,7 +1565,7 @@ func (c *ClusterV3) waitMembersMatch(t testutil.TB) { return } - time.Sleep(tickDuration) + time.Sleep(TickDuration) } } @@ -1518,9 +1578,9 @@ func (p SortableProtoMemberSliceByPeerURLs) Less(i, j int) bool { func (p SortableProtoMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // MustNewMember creates a new member instance based on the response of V3 Member Add API. -func (c *ClusterV3) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *member { +func (c *Cluster) MustNewMember(t testutil.TB, resp *clientv3.MemberAddResponse) *Member { m := c.mustNewMember(t) - m.isLearner = resp.Member.IsLearner + m.IsLearner = resp.Member.IsLearner m.NewCluster = false m.InitialPeerURLsMap = types.URLsMap{} diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster_direct.go b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_direct.go similarity index 88% rename from vendor/go.etcd.io/etcd/tests/v3/integration/cluster_direct.go rename to vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_direct.go index 67daf7caea..dad4875251 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster_direct.go +++ b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_direct.go @@ -22,13 +22,12 @@ import ( "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb" "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb" - "go.uber.org/zap" ) const ThroughProxy = false -func toGRPC(c *clientv3.Client) grpcAPI { - return grpcAPI{ +func ToGRPC(c *clientv3.Client) GrpcAPI { + return GrpcAPI{ pb.NewClusterClient(c.ActiveConnection()), pb.NewKVClient(c.ActiveConnection()), pb.NewLeaseClient(c.ActiveConnection()), @@ -40,7 +39,6 @@ func toGRPC(c *clientv3.Client) grpcAPI { } } -func newClientV3(cfg clientv3.Config, lg *zap.Logger) (*clientv3.Client, error) { - cfg.Logger = lg +func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { return clientv3.New(cfg) } diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster_proxy.go b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_proxy.go similarity index 94% rename from vendor/go.etcd.io/etcd/tests/v3/integration/cluster_proxy.go rename to vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_proxy.go index e8549eea3f..a5266d09ed 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/cluster_proxy.go +++ b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/cluster_proxy.go @@ -25,7 +25,6 @@ import ( "go.etcd.io/etcd/client/v3/namespace" "go.etcd.io/etcd/server/v3/proxy/grpcproxy" "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter" - "go.uber.org/zap" ) const ThroughProxy = true @@ -40,13 +39,13 @@ const proxyNamespace = "proxy-namespace" type grpcClientProxy struct { ctx context.Context ctxCancel func() - grpc grpcAPI + grpc GrpcAPI wdonec <-chan struct{} kvdonec <-chan struct{} lpdonec <-chan struct{} } -func toGRPC(c *clientv3.Client) grpcAPI { +func ToGRPC(c *clientv3.Client) GrpcAPI { pmu.Lock() defer pmu.Unlock() @@ -75,7 +74,7 @@ func toGRPC(c *clientv3.Client) grpcAPI { lockp := grpcproxy.NewLockProxy(c) electp := grpcproxy.NewElectionProxy(c) - grpc := grpcAPI{ + grpc := GrpcAPI{ adapter.ClusterServerToClusterClient(clp), adapter.KvServerToKvClient(kvp), adapter.LeaseServerToLeaseClient(lp), @@ -108,13 +107,12 @@ func (pc *proxyCloser) Close() error { return err } -func newClientV3(cfg clientv3.Config, lg *zap.Logger) (*clientv3.Client, error) { - cfg.Logger = lg +func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { c, err := clientv3.New(cfg) if err != nil { return nil, err } - rpc := toGRPC(c) + rpc := ToGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV, c) pmu.Lock() lc := c.Lease diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/testing.go b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/testing.go similarity index 98% rename from vendor/go.etcd.io/etcd/tests/v3/integration/testing.go rename to vendor/go.etcd.io/etcd/tests/v3/framework/integration/testing.go index e67375180b..ca4a27f0d5 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/testing.go +++ b/vendor/go.etcd.io/etcd/tests/v3/framework/integration/testing.go @@ -129,8 +129,8 @@ func NewEmbedConfig(t testing.TB, name string) *embed.Config { } func NewClient(t testing.TB, cfg clientv3.Config) (*clientv3.Client, error) { - if cfg.Logger != nil { - cfg.Logger = zaptest.NewLogger(t) + if cfg.Logger == nil { + cfg.Logger = zaptest.NewLogger(t).Named("client") } return clientv3.New(cfg) } diff --git a/vendor/go.etcd.io/etcd/tests/v3/integration/lazy_cluster.go b/vendor/go.etcd.io/etcd/tests/v3/integration/lazy_cluster.go index 4cc7ae765d..02fc759dc7 100644 --- a/vendor/go.etcd.io/etcd/tests/v3/integration/lazy_cluster.go +++ b/vendor/go.etcd.io/etcd/tests/v3/integration/lazy_cluster.go @@ -22,6 +22,7 @@ import ( "go.etcd.io/etcd/client/pkg/v3/testutil" "go.etcd.io/etcd/client/pkg/v3/transport" + "go.etcd.io/etcd/tests/v3/framework/integration" ) // Infrastructure to provision a single shared cluster for tests - only @@ -42,7 +43,7 @@ type LazyCluster interface { EndpointsV3() []string // Cluster - calls to this method might initialize the cluster. - Cluster() *ClusterV3 + Cluster() *integration.Cluster // Transport - call to this method might initialize the cluster. Transport() *http.Transport @@ -53,8 +54,8 @@ type LazyCluster interface { } type lazyCluster struct { - cfg ClusterConfig - cluster *ClusterV3 + cfg integration.ClusterConfig + cluster *integration.Cluster transport *http.Transport once sync.Once tb testutil.TB @@ -64,12 +65,12 @@ type lazyCluster struct { // NewLazyCluster returns a new test cluster handler that gets created on the // first call to GetEndpoints() or GetTransport() func NewLazyCluster() LazyCluster { - return NewLazyClusterWithConfig(ClusterConfig{Size: 1}) + return NewLazyClusterWithConfig(integration.ClusterConfig{Size: 1}) } // NewLazyClusterWithConfig returns a new test cluster handler that gets created // on the first call to GetEndpoints() or GetTransport() -func NewLazyClusterWithConfig(cfg ClusterConfig) LazyCluster { +func NewLazyClusterWithConfig(cfg integration.ClusterConfig) LazyCluster { tb, closer := testutil.NewTestingTBProthesis("lazy_cluster") return &lazyCluster{cfg: cfg, tb: tb, closer: closer} } @@ -81,7 +82,7 @@ func (lc *lazyCluster) mustLazyInit() { if err != nil { log.Fatal(err) } - lc.cluster = NewClusterV3(lc.tb, &lc.cfg) + lc.cluster = integration.NewCluster(lc.tb, &lc.cfg) }) } @@ -105,7 +106,7 @@ func (lc *lazyCluster) EndpointsV3() []string { return lc.Cluster().Client(0).Endpoints() } -func (lc *lazyCluster) Cluster() *ClusterV3 { +func (lc *lazyCluster) Cluster() *integration.Cluster { lc.mustLazyInit() return lc.cluster } diff --git a/vendor/go.etcd.io/etcd/v3/.gitignore b/vendor/go.etcd.io/etcd/v3/.gitignore index 75dbff8e8a..ab1bbe4ceb 100644 --- a/vendor/go.etcd.io/etcd/v3/.gitignore +++ b/vendor/go.etcd.io/etcd/v3/.gitignore @@ -14,6 +14,7 @@ *.test hack/tls-setup/certs .idea +/contrib/mixin/manifests /contrib/raftexample/raftexample /contrib/raftexample/raftexample-* /vendor @@ -21,3 +22,5 @@ hack/tls-setup/certs *.tmp *.bak .gobincache/ +/Documentation/dev-guide/api_reference_v3.md +/Documentation/dev-guide/api_concurrency_reference_v3.md diff --git a/vendor/go.etcd.io/etcd/v3/.travis.yml b/vendor/go.etcd.io/etcd/v3/.travis.yml deleted file mode 100644 index 6cf103e809..0000000000 --- a/vendor/go.etcd.io/etcd/v3/.travis.yml +++ /dev/null @@ -1,51 +0,0 @@ -language: go -go_import_path: go.etcd.io/etcd/v3 - -sudo: required - -services: docker - -go: - - "1.16.3" - - tip - -notifications: - on_success: never - on_failure: never - -env: - matrix: - - TARGET=linux-amd64-coverage - - TARGET=linux-amd64-fmt-unit-go-tip-2-cpu - -matrix: - fast_finish: true - allow_failures: - - go: "1.16.3" - env: TARGET=linux-amd64-coverage - - go: tip - env: TARGET=linux-amd64-fmt-unit-go-tip-2-cpu - exclude: - - go: tip - env: TARGET=linux-amd64-coverage - - go: "1.16.3" - env: TARGET=linux-amd64-fmt-unit-go-tip-2-cpu - -before_install: - - if [[ $TRAVIS_GO_VERSION == 1.* ]]; then docker pull gcr.io/etcd-development/etcd-test:go${TRAVIS_GO_VERSION}; fi - -install: - - date - -script: - - date - - echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}" - - > - case "${TARGET}" in - linux-amd64-coverage) - sudo HOST_TMP_DIR=/tmp TEST_OPTS="VERBOSE='1'" make docker-test-coverage - ;; - linux-amd64-fmt-unit-go-tip-2-cpu) - GOARCH=amd64 PASSES='fmt unit' CPU='2' RACE='false' ./test.sh -p=2 - ;; - esac diff --git a/vendor/go.etcd.io/etcd/v3/.words b/vendor/go.etcd.io/etcd/v3/.words deleted file mode 100644 index da36ba44ba..0000000000 --- a/vendor/go.etcd.io/etcd/v3/.words +++ /dev/null @@ -1,116 +0,0 @@ -accessors -addrConns -args -atomics -backoff -BackoffFunc -BackoffLinearWithJitter -Balancer -BidiStreams -blackhole -blackholed -CallOptions -cancelable -cancelation -ccBalancerWrapper -clientURLs -clusterName -cluster_proxy -consistentIndex -ConsistentIndexGetter -DefaultMaxRequestBytes -defragment -defragmenting -deleter -dev -/dev/null -dev/null -DNS -errClientDisconnected -ErrCodeEnhanceYourCalm -ErrConnClosing -ErrRequestTooLarge -ErrTimeout -etcd -FIXME -github -GoAway -goroutine -goroutines -gRPC -grpcAddr -hasleader -healthcheck -hostname -iff -inflight -InfoLevel -jitter -jitter -jitter -keepalive -Keepalive -KeepAlive -keepalives -keyspace -lexically -lexicographically -linearizable -linearization -linearized -liveness -localhost -__lostleader -MaxRequestBytes -MiB -middleware -mutators -mutex -nils -nondeterministically -nop -OutputWALDir -parsedTarget -passthrough -PermitWithoutStream -prefetching -prometheus -protobuf -racey -rafthttp -rebalanced -reconnection -repin -ResourceExhausted -retriable -retriable -rpc -RPC -RPCs -saveWALAndSnap -serializable -ServerStreams -SHA -SRV -statusError -subConn -subconns -SubConns -teardown -TestBalancerDoNotBlockOnClose -todo -too_many_pings -transactional -transferee -transientFailure -unbuffered -uncontended -unfreed -unlisting -unprefixed -WatchProgressNotifyInterval -WAL -WithBackoff -WithDialer -WithMax -WithRequireLeader diff --git a/vendor/go.etcd.io/etcd/v3/ADOPTERS.md b/vendor/go.etcd.io/etcd/v3/ADOPTERS.md new file mode 100644 index 0000000000..c6c294637d --- /dev/null +++ b/vendor/go.etcd.io/etcd/v3/ADOPTERS.md @@ -0,0 +1,250 @@ +--- +title: Production users +--- + +This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on how etcd is working in the field and update this list. + +## All Kubernetes Users + +- *Application*: https://kubernetes.io/ +- *Environments*: AWS, OpenStack, Azure, Google Cloud, Huawei Cloud, Bare Metal, etc + +**This is a meta user; please feel free to document specific Kubernetes clusters!** + +All Kubernetes clusters use etcd as their primary data store. This means etcd's users include such companies as [Niantic, Inc Pokemon Go](https://cloudplatform.googleblog.com/2016/09/bringing-Pokemon-GO-to-life-on-Google-Cloud.html), [Box](https://blog.box.com/blog/kubernetes-box-microservices-maximum-velocity/), [CoreOS](https://coreos.com/tectonic), [Ticketmaster](https://www.youtube.com/watch?v=wqXVKneP0Hg), [Salesforce](https://www.salesforce.com) and many many more. + +## discovery.etcd.io + +- *Application*: https://github.com/coreos/discovery.etcd.io +- *Launched*: Feb. 2014 +- *Cluster Size*: 5 members, 5 discovery proxies +- *Order of Data Size*: 100s of Megabytes +- *Operator*: CoreOS, brandon.philips@coreos.com +- *Environment*: AWS +- *Backups*: Periodic async to S3 + +discovery.etcd.io is the longest continuously running etcd backed service that we know about. It is the basis of automatic cluster bootstrap and was launched in Feb. 2014: https://coreos.com/blog/etcd-0.3.0-released/. + +## OpenTable + +- *Application*: OpenTable internal service discovery and cluster configuration management +- *Launched*: May 2014 +- *Cluster Size*: 3 members each in 6 independent clusters; approximately 50 nodes reading / writing +- *Order of Data Size*: 10s of MB +- *Operator*: OpenTable, Inc; sschlansker@opentable.com +- *Environment*: AWS, VMWare +- *Backups*: None, all data can be re-created if necessary. + +## cycoresys.com + +- *Application*: multiple +- *Launched*: Jul. 2014 +- *Cluster Size*: 3 members, _n_ proxies +- *Order of Data Size*: 100s of kilobytes +- *Operator*: CyCore Systems, Inc, sys@cycoresys.com +- *Environment*: Baremetal +- *Backups*: Periodic sync to Ceph RadosGW and DigitalOcean VM + +CyCore Systems provides architecture and engineering for computing systems. This cluster provides microservices, virtual machines, databases, storage clusters to a number of clients. It is built on CoreOS machines, with each machine in the cluster running etcd as a peer or proxy. + +## Radius Intelligence + +- *Application*: multiple internal tools, Kubernetes clusters, bootstrappable system configs +- *Launched*: June 2015 +- *Cluster Size*: 2 clusters of 5 and 3 members; approximately a dozen nodes read/write +- *Order of Data Size*: 100s of kilobytes +- *Operator*: Radius Intelligence; jcderr@radius.com +- *Environment*: AWS, CoreOS, Kubernetes +- *Backups*: None, all data can be recreated if necessary. + +Radius Intelligence uses Kubernetes running CoreOS to containerize and scale internal toolsets. Examples include running [JetBrains TeamCity][teamcity] and internal AWS security and cost reporting tools. etcd clusters back these clusters as well as provide some basic environment bootstrapping configuration keys. + +## Vonage + +- *Application*: kubernetes, vault backend, system configuration for microservices, scheduling, locks (future - service discovery) +- *Launched*: August 2015 +- *Cluster Size*: 2 clusters of 5 members in 2 DCs, n local proxies 1-to-1 with microservice, (ssl and SRV look up) +- *Order of Data Size*: kilobytes +- *Operator*: Vonage [devAdmin][raoofm] +- *Environment*: VMWare, AWS +- *Backups*: Daily snapshots on VMs. Backups done for upgrades. + +## PD + +- *Application*: embed etcd +- *Launched*: Mar 2016 +- *Cluster Size*: 3 or 5 members +- *Order of Data Size*: megabytes +- *Operator*: PingCAP, Inc. +- *Environment*: Bare Metal, AWS, etc. +- *Backups*: None. + +PD(Placement Driver) is the central controller in the TiDB cluster. It saves the cluster meta information, schedule the data, allocate the global unique timestamp for the distributed transaction, etc. It embeds etcd to supply high availability and auto failover. + +## Huawei + +- *Application*: System configuration for overlay network (Canal) +- *Launched*: June 2016 +- *Cluster Size*: 3 members for each cluster +- *Order of Data Size*: kilobytes +- *Operator*: Huawei Euler Department +- *Environment*: [Huawei Cloud](http://www.hwclouds.com/product/cce.html) +- *Backups*: None, all data can be recreated if necessary. + +[teamcity]: https://www.jetbrains.com/teamcity/ +[raoofm]:https://github.com/raoofm + +## Qiniu Cloud + +- *Application*: system configuration for microservices, distributed locks +- *Launched*: Jan. 2016 +- *Cluster Size*: 3 members each with several clusters +- *Order of Data Size*: kilobytes +- *Operator*: Pandora, chenchao@qiniu.com +- *Environment*: Baremetal +- *Backups*: None, all data can be recreated if necessary + +## QingCloud + +- *Application*: [QingCloud][qingcloud] appcenter cluster for service discovery as [metad][metad] backend. +- *Launched*: December 2016 +- *Cluster Size*: 1 cluster of 3 members per user. +- *Order of Data Size*: kilobytes +- *Operator*: [yunify][yunify] +- *Environment*: QingCloud IaaS +- *Backups*: None, all data can be recreated if necessary. + +[metad]:https://github.com/yunify/metad +[yunify]:https://github.com/yunify +[qingcloud]:https://qingcloud.com/ + + +## Yandex + +- *Application*: system configuration for services, service discovery +- *Launched*: March 2016 +- *Cluster Size*: 3 clusters of 5 members +- *Order of Data Size*: several gigabytes +- *Operator*: Yandex; [nekto0n][nekto0n] +- *Environment*: Bare Metal +- *Backups*: None + +[nekto0n]:https://github.com/nekto0n + +## Tencent Games + +- *Application*: Meta data and configuration data for service discovery, Kubernetes, etc. +- *Launched*: Jan. 2015 +- *Cluster Size*: 3 members each with 10s of clusters +- *Order of Data Size*: 10s of Megabytes +- *Operator*: Tencent Game Operations Department +- *Environment*: Baremetal +- *Backups*: Periodic sync to backup server + +In Tencent games, we use Docker and Kubernetes to deploy and run our applications, and use etcd to save meta data for service discovery, Kubernetes, etc. + +## Hyper.sh + +- *Application*: Kubernetes, distributed locks, etc. +- *Launched*: April 2016 +- *Cluster Size*: 1 cluster of 3 members +- *Order of Data Size*: 10s of MB +- *Operator*: Hyper.sh +- *Environment*: Baremetal +- *Backups*: None, all data can be recreated if necessary. + +In [hyper.sh][hyper.sh], the container service is backed by [hypernetes][hypernetes], a multi-tenant kubernetes distro. Moreover, we use etcd to coordinate the multiple manage services and store global meta data. + +[hypernetes]:https://github.com/hyperhq/hypernetes +[Hyper.sh]:https://www.hyper.sh + +## Meitu +- *Application*: system configuration for services, service discovery, kubernetes in test environment +- *Launched*: October 2015 +- *Cluster Size*: 1 cluster of 3 members +- *Order of Data Size*: megabytes +- *Operator*: Meitu, hxj@meitu.com, [shafreeck][shafreeck] +- *Environment*: Bare Metal +- *Backups*: None, all data can be recreated if necessary. + +[shafreeck]:https://github.com/shafreeck + +## Grab +- *Application*: system configuration for services, service discovery +- *Launched*: June 2016 +- *Cluster Size*: 1 cluster of 7 members +- *Order of Data Size*: megabytes +- *Operator*: Grab, [taxitan][taxitan], [reterVision][reterVision] +- *Environment*: AWS +- *Backups*: None, all data can be recreated if necessary. + +[taxitan]:https://github.com/taxitan +[reterVision]:https://github.com/reterVision + +## DaoCloud.io + +- *Application*: container management +- *Launched*: Sep. 2015 +- *Cluster Size*: 1000+ deployments, each deployment contains a 3 node cluster. +- *Order of Data Size*: 100s of Megabytes +- *Operator*: daocloud.io +- *Environment*: Baremetal and virtual machines +- *Backups*: None, all data can be recreated if necessary. + +In [DaoCloud][DaoCloud], we use Docker and Swarm to deploy and run our applications, and we use etcd to save metadata for service discovery. + +[DaoCloud]:https://www.daocloud.io + +## Branch.io + +- *Application*: Kubernetes +- *Launched*: April 2016 +- *Cluster Size*: Multiple clusters, multiple sizes +- *Order of Data Size*: 100s of Megabytes +- *Operator*: branch.io +- *Environment*: AWS, Kubernetes +- *Backups*: EBS volume backups + +At [Branch][branch], we use kubernetes heavily as our core microservice platform for staging and production. + +[branch]: https://branch.io + +## Baidu Waimai + +- *Application*: SkyDNS, Kubernetes, UDC, CMDB and other distributed systems +- *Launched*: April. 2016 +- *Cluster Size*: 3 clusters of 5 members +- *Order of Data Size*: several gigabytes +- *Operator*: Baidu Waimai Operations Department +- *Environment*: CentOS 6.5 +- *Backups*: backup scripts + +## Salesforce.com + +- *Application*: Kubernetes +- *Launched*: Jan 2017 +- *Cluster Size*: Multiple clusters of 3 members +- *Order of Data Size*: 100s of Megabytes +- *Operator*: Salesforce.com (krmayankk@github) +- *Environment*: BareMetal +- *Backups*: None, all data can be recreated + +## Hosted Graphite + +- *Application*: Service discovery, locking, ephemeral application data +- *Launched*: January 2017 +- *Cluster Size*: 2 clusters of 7 members +- *Order of Data Size*: Megabytes +- *Operator*: Hosted Graphite (sre@hostedgraphite.com) +- *Environment*: Bare Metal +- *Backups*: None, all data is considered ephemeral. + +## Transwarp + +- *Application*: Transwarp Data Cloud, Transwarp Operating System, Transwarp Data Hub, Sophon +- *Launched*: January 2016 +- *Cluster Size*: Multiple clusters, multiple sizes +- *Order of Data Size*: Megabytes +- *Operator*: Trasnwarp Operating System +- *Environment*: Bare Metal, Container +- *Backups*: backup scripts diff --git a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.amd64 b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.amd64 index 9bd425887c..67400b6968 100644 --- a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.amd64 +++ b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.amd64 @@ -1,4 +1,8 @@ -FROM k8s.gcr.io/build-image/debian-base:buster-v1.4.0 +FROM --platform=linux/amd64 busybox:1.34.1 as source +FROM --platform=linux/amd64 gcr.io/distroless/base-debian11 + +COPY --from=source /bin/sh /bin/sh +COPY --from=source /bin/mkdir /bin/mkdir ADD etcd /usr/local/bin/ ADD etcdctl /usr/local/bin/ diff --git a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.arm64 b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.arm64 index d04d79041a..b8ce477afd 100644 --- a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.arm64 +++ b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.arm64 @@ -1,4 +1,8 @@ -FROM k8s.gcr.io/build-image/debian-base-arm64:buster-v1.4.0 +FROM --platform=linux/arm64 busybox:1.34.1 as source +FROM --platform=linux/arm64 gcr.io/distroless/base-debian11 + +COPY --from=source /bin/sh /bin/sh +COPY --from=source /bin/mkdir /bin/mkdir ADD etcd /usr/local/bin/ ADD etcdctl /usr/local/bin/ diff --git a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.ppc64le b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.ppc64le index 51adb7ae3a..9cfe5d4333 100644 --- a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.ppc64le +++ b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.ppc64le @@ -1,4 +1,8 @@ -FROM k8s.gcr.io/build-image/debian-base-ppc64le:buster-v1.4.0 +FROM --platform=linux/ppc64le busybox:1.34.1 as source +FROM --platform=linux/ppc64le gcr.io/distroless/base-debian11 + +COPY --from=source /bin/sh /bin/sh +COPY --from=source /bin/mkdir /bin/mkdir ADD etcd /usr/local/bin/ ADD etcdctl /usr/local/bin/ diff --git a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.s390x b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.s390x index a96d45534c..d901b410c9 100644 --- a/vendor/go.etcd.io/etcd/v3/Dockerfile-release.s390x +++ b/vendor/go.etcd.io/etcd/v3/Dockerfile-release.s390x @@ -1,4 +1,9 @@ -FROM k8s.gcr.io/build-image/debian-base-s390x:buster-v1.4.0 +FROM --platform=linux/s390x busybox:1.34.1 as source +FROM --platform=linux/s390x gcr.io/distroless/base-debian11 + +COPY --from=source /bin/sh /bin/sh +COPY --from=source /bin/mkdir /bin/mkdir + ADD etcd /usr/local/bin/ ADD etcdctl /usr/local/bin/ diff --git a/vendor/go.etcd.io/etcd/v3/MAINTAINERS b/vendor/go.etcd.io/etcd/v3/MAINTAINERS index ecf320ad39..c91a77f403 100644 --- a/vendor/go.etcd.io/etcd/v3/MAINTAINERS +++ b/vendor/go.etcd.io/etcd/v3/MAINTAINERS @@ -9,11 +9,11 @@ # Please keep the list sorted. # MAINTAINERS -Brandon Philips (@philips) pkg:* -Gyuho Lee (@gyuho) pkg:* +Gyuho Lee (@gyuho) pkg:* Hitoshi Mitake (@mitake) pkg:* Jingyi Hu (@jingyih) pkg:* Joe Betz (@jpbetz) pkg:* +Marek Siarkowicz (@serathius) pkg:* Piotr Tabor (@ptabor) pkg:* Sahdev Zala (@spzala) pkg:* Sam Batschelet (@hexfusion) pkg:* @@ -22,3 +22,8 @@ Xiang Li (@xiang90) pkg:* Ben Darnell (@bdarnell) pkg:go.etcd.io/etcd/raft Tobias Grieger (@tbg) pkg:go.etcd.io/etcd/raft + +# REVIEWERS +Lili Cosic (lilic@) pkg:* +Wilson Wang (wilsonwang371@) pkg:* +Benjamin Wang (ahrtr@) pkg:* diff --git a/vendor/go.etcd.io/etcd/v3/Makefile b/vendor/go.etcd.io/etcd/v3/Makefile index bfd6741c60..07f183c74b 100644 --- a/vendor/go.etcd.io/etcd/v3/Makefile +++ b/vendor/go.etcd.io/etcd/v3/Makefile @@ -22,7 +22,7 @@ XARGS += rm -r .PHONY: build build: - GO_BUILD_FLAGS="-v" ./build.sh + GO_BUILD_FLAGS="-v" ./scripts/build.sh ./bin/etcd --version ./bin/etcdctl version ./bin/etcdutl version @@ -39,23 +39,7 @@ clean: rm -rf ./tests/e2e/default.proxy find ./ -name "127.0.0.1:*" -o -name "localhost:*" -o -name "*.log" -o -name "agent-*" -o -name "*.coverprofile" -o -name "testname-proxy-*" | $(XARGS) -docker-clean: - docker images - docker image prune --force - -docker-start: - service docker restart - -docker-kill: - docker kill `docker ps -q` || true - -docker-remove: - docker rm --force `docker ps -a -q` || true - docker rmi --force `docker images -q` || true - - - -GO_VERSION ?= 1.16.3 +GO_VERSION ?= 1.17.6 ETCD_VERSION ?= $(shell git rev-parse --short HEAD || echo "GitNotFound") TEST_SUFFIX = $(shell date +%s | base64 | head -c 15) @@ -96,31 +80,6 @@ pull-docker-test: $(info GO_VERSION: $(GO_VERSION)) docker pull gcr.io/etcd-development/etcd-test:go$(GO_VERSION) - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make compile-setup-gopath-with-docker-test - -compile-with-docker-test: - $(info GO_VERSION: $(GO_VERSION)) - docker run \ - --rm \ - --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \ - gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \ - /bin/bash -c "GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build.sh && ./bin/etcd --version" - -compile-setup-gopath-with-docker-test: - $(info GO_VERSION: $(GO_VERSION)) - docker run \ - --rm \ - --mount type=bind,source=`pwd`,destination=/etcd \ - gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && ETCD_SETUP_GOPATH=1 GO_BUILD_FLAGS=-v GOOS=linux GOARCH=amd64 ./build.sh && ./bin/etcd --version && rm -rf ./gopath" - - - # Example: # # Local machine: @@ -129,39 +88,29 @@ compile-setup-gopath-with-docker-test: # TEST_OPTS="PASSES='build unit release integration_e2e functional'" make test # TEST_OPTS="PASSES='build grpcproxy'" make test # -# Example (test with docker): -# make pull-docker-test -# TEST_OPTS="PASSES='fmt'" make docker-test -# TEST_OPTS="VERBOSE=2 PASSES='unit'" make docker-test -# -# Travis CI (test with docker): -# TEST_OPTS="PASSES='fmt bom dep build unit'" make docker-test -# -# Semaphore CI (test with docker): -# TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test -# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build unit release integration_e2e functional'" make docker-test -# TEST_OPTS="GOARCH=386 PASSES='build unit integration_e2e'" make docker-test -# -# grpc-proxy tests (test with docker): -# TEST_OPTS="PASSES='build grpcproxy'" make docker-test -# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make docker-test +# grpc-proxy tests: +# TEST_OPTS="PASSES='build grpcproxy'" make test +# HOST_TMP_DIR=/tmp TEST_OPTS="PASSES='build grpcproxy'" make test .PHONY: test test: $(info TEST_OPTS: $(TEST_OPTS)) $(info log-file: test-$(TEST_SUFFIX).log) - $(TEST_OPTS) ./test.sh 2>&1 | tee test-$(TEST_SUFFIX).log - ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log + $(TEST_OPTS) ./scripts/test.sh 2>&1 | tee test-$(TEST_SUFFIX).log + ! egrep "(--- FAIL:|FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log test-smoke: $(info log-file: test-$(TEST_SUFFIX).log) - PASSES="fmt build unit" ./test.sh 2<&1 | tee test-$(TEST_SUFFIX).log + PASSES="fmt build unit" ./scripts/test.sh 2<&1 | tee test-$(TEST_SUFFIX).log test-full: $(info log-file: test-$(TEST_SUFFIX).log) - PASSES="fmt build release unit integration functional e2e grpcproxy" ./test.sh 2<&1 | tee test-$(TEST_SUFFIX).log + PASSES="fmt build release unit integration functional e2e grpcproxy" ./scripts/test.sh 2<&1 | tee test-$(TEST_SUFFIX).log + +ensure-docker-test-image-exists: + make push-docker-test || echo "WARNING: Container Image not found in registry, building locally"; make build-docker-test -docker-test: +docker-test: ensure-docker-test-image-exists $(info GO_VERSION: $(GO_VERSION)) $(info ETCD_VERSION: $(ETCD_VERSION)) $(info TEST_OPTS: $(TEST_OPTS)) @@ -173,8 +122,8 @@ docker-test: $(TMP_DIR_MOUNT_FLAG) \ --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \ gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \ - /bin/bash -c "$(TEST_OPTS) ./test.sh 2>&1 | tee test-$(TEST_SUFFIX).log" - ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log + /bin/bash -c "$(TEST_OPTS) ./scripts/test.sh 2>&1 | tee test-$(TEST_SUFFIX).log" + ! egrep "(--- FAIL:|FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 test-$(TEST_SUFFIX).log docker-test-coverage: $(info GO_VERSION: $(GO_VERSION)) @@ -188,12 +137,10 @@ docker-test-coverage: --mount type=bind,source=`pwd`,destination=/go/src/go.etcd.io/etcd \ gcr.io/etcd-development/etcd-test:go$(GO_VERSION) \ /bin/bash ./scripts/codecov_upload.sh docker-test-coverage-$(TEST_SUFFIX).log \ - ! egrep "(--- FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log - + ! egrep "(--- FAIL:|FAIL:|DATA RACE|panic: test timed out|appears to have leaked)" -B50 -A10 docker-test-coverage-$(TEST_SUFFIX).log # Example: -# make compile-with-docker-test # ETCD_VERSION=v3-test make build-docker-release-main # ETCD_VERSION=v3-test make push-docker-release-main # gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com @@ -216,333 +163,3 @@ build-docker-release-main: push-docker-release-main: $(info ETCD_VERSION: $(ETCD_VERSION)) docker push gcr.io/etcd-development/etcd:$(ETCD_VERSION) - - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-static-ip-test -# -# gcloud auth configure-docker -# make push-docker-static-ip-test -# -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-static-ip-test -# -# make docker-static-ip-test-certs-run -# make docker-static-ip-test-certs-metrics-proxy-run - -build-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-static-ip/Dockerfile > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - --file ./tests/docker-static-ip/Dockerfile \ - $(TMP_DOCKERFILE) - -push-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) - -pull-docker-static-ip-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) - -docker-static-ip-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-static-ip/certs,destination=/certs \ - gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-static-ip-test-certs-metrics-proxy-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-static-ip/certs-metrics-proxy,destination=/certs-metrics-proxy \ - gcr.io/etcd-development/etcd-static-ip-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-metrics-proxy/run.sh && rm -rf m*.etcd" - - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-dns-test -# -# gcloud auth configure-docker -# make push-docker-dns-test -# -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-dns-test -# -# make docker-dns-test-insecure-run -# make docker-dns-test-certs-run -# make docker-dns-test-certs-gateway-run -# make docker-dns-test-certs-wildcard-run -# make docker-dns-test-certs-common-name-auth-run -# make docker-dns-test-certs-common-name-multi-run -# make docker-dns-test-certs-san-dns-run - -build-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' ./tests/docker-dns/Dockerfile > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - --file ./tests/docker-dns/Dockerfile \ - $(TMP_DOCKERFILE) - - docker run \ - --rm \ - --dns 127.0.0.1 \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig etcd.local" - -push-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) - -pull-docker-dns-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) - -docker-dns-test-insecure-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/insecure,destination=/insecure \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /insecure/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs,destination=/certs \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-gateway-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-gateway,destination=/certs-gateway \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-wildcard-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-wildcard,destination=/certs-wildcard \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-common-name-auth-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-auth,destination=/certs-common-name-auth \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-common-name-auth/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-common-name-multi-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-common-name-multi,destination=/certs-common-name-multi \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-common-name-multi/run.sh && rm -rf m*.etcd" - -docker-dns-test-certs-san-dns-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns/certs-san-dns,destination=/certs-san-dns \ - gcr.io/etcd-development/etcd-dns-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-san-dns/run.sh && rm -rf m*.etcd" - - -# Example: -# make build-docker-test -# make compile-with-docker-test -# make build-docker-dns-srv-test -# gcloud auth configure-docker -# make push-docker-dns-srv-test -# gsutil -m acl ch -u allUsers:R -r gs://artifacts.etcd-development.appspot.com -# make pull-docker-dns-srv-test -# make docker-dns-srv-test-certs-run -# make docker-dns-srv-test-certs-gateway-run -# make docker-dns-srv-test-certs-wildcard-run - -build-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - --file ./tests/docker-dns-srv/Dockerfile \ - $(TMP_DOCKERFILE) - - docker run \ - --rm \ - --dns 127.0.0.1 \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "/etc/init.d/bind9 start && cat /dev/null >/etc/hosts && dig +noall +answer SRV _etcd-client-ssl._tcp.etcd.local && dig +noall +answer SRV _etcd-server-ssl._tcp.etcd.local && dig +noall +answer m1.etcd.local m2.etcd.local m3.etcd.local" - -push-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - docker push gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) - -pull-docker-dns-srv-test: - $(info GO_VERSION: $(GO_VERSION)) - docker pull gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) - -docker-dns-srv-test-certs-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs,destination=/certs \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs/run.sh && rm -rf m*.etcd" - -docker-dns-srv-test-certs-gateway-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-gateway,destination=/certs-gateway \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-gateway/run.sh && rm -rf m*.etcd" - -docker-dns-srv-test-certs-wildcard-run: - $(info GO_VERSION: $(GO_VERSION)) - $(info HOST_TMP_DIR: $(HOST_TMP_DIR)) - $(info TMP_DIR_MOUNT_FLAG: $(TMP_DIR_MOUNT_FLAG)) - docker run \ - --rm \ - --tty \ - --dns 127.0.0.1 \ - $(TMP_DIR_MOUNT_FLAG) \ - --mount type=bind,source=`pwd`/bin,destination=/etcd \ - --mount type=bind,source=`pwd`/tests/docker-dns-srv/certs-wildcard,destination=/certs-wildcard \ - gcr.io/etcd-development/etcd-dns-srv-test:go$(GO_VERSION) \ - /bin/bash -c "cd /etcd && /certs-wildcard/run.sh && rm -rf m*.etcd" - - - -# Example: -# make build-functional -# make build-docker-functional -# make push-docker-functional -# make pull-docker-functional - -build-functional: - $(info GO_VERSION: $(GO_VERSION)) - $(info ETCD_VERSION: $(ETCD_VERSION)) - ./tests/functional/build - ./bin/etcd-agent -help || true && \ - ./bin/etcd-proxy -help || true && \ - ./bin/etcd-runner --help || true && \ - ./bin/etcd-tester -help || true - -build-docker-functional: - $(info GO_VERSION: $(GO_VERSION)) - $(info ETCD_VERSION: $(ETCD_VERSION)) - @sed 's|REPLACE_ME_GO_VERSION|$(GO_VERSION)|g' > $(TMP_DOCKERFILE) - docker build \ - --network=host \ - --tag gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \ - --file ./tests/functional/Dockerfile \ - . - @mv ./tests/functional/Dockerfile.bak ./tests/functional/Dockerfile - - docker run \ - --rm \ - gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) \ - /bin/bash -c "./bin/etcd --version && \ - ./bin/etcd-failpoints --version && \ - ./bin/etcdctl version && \ - ./bin/etcdutl version && \ - ./bin/etcd-agent -help || true && \ - ./bin/etcd-proxy -help || true && \ - ./bin/etcd-runner --help || true && \ - ./bin/etcd-tester -help || true && \ - ./bin/benchmark --help || true" - -push-docker-functional: - $(info GO_VERSION: $(GO_VERSION)) - $(info ETCD_VERSION: $(ETCD_VERSION)) - docker push gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) - -pull-docker-functional: - $(info GO_VERSION: $(GO_VERSION)) - $(info ETCD_VERSION: $(ETCD_VERSION)) - docker pull gcr.io/etcd-development/etcd-functional:go$(GO_VERSION) diff --git a/vendor/go.etcd.io/etcd/v3/README.md b/vendor/go.etcd.io/etcd/v3/README.md index fc438ae732..a4ae182c88 100644 --- a/vendor/go.etcd.io/etcd/v3/README.md +++ b/vendor/go.etcd.io/etcd/v3/README.md @@ -1,11 +1,10 @@ # etcd [![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/etcd?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/etcd) -[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd) +[![Coverage](https://codecov.io/gh/etcd-io/etcd/branch/main/graph/badge.svg)](https://codecov.io/gh/etcd-io/etcd) [![Tests](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/tests.yaml) [![asset-transparency](https://github.com/etcd-io/etcd/actions/workflows/asset-transparency.yaml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/asset-transparency.yaml) [![codeql-analysis](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/codeql-analysis.yml) -[![self-hosted-linux-arm64-graviton2-tests](https://github.com/etcd-io/etcd/actions/workflows/self-hosted-linux-arm64-graviton2-tests.yml/badge.svg)](https://github.com/etcd-io/etcd/actions/workflows/self-hosted-linux-arm64-graviton2-tests.yml) [![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) [![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/etcd) [![Releases](https://img.shields.io/github/release/etcd-io/etcd/all.svg?style=flat-square)](https://github.com/etcd-io/etcd/releases) @@ -41,40 +40,18 @@ etcd contributors and maintainers have monthly (every four weeks) meetings at 11 An initial agenda will be posted to the [shared Google docs][shared-meeting-notes] a day before each meeting, and everyone is welcome to suggest additional topics or other agendas. -[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit - - -Time: -- [Jan 10th, 2019 11:00 AM video](https://www.youtube.com/watch?v=0Cphtbd1OSc&feature=youtu.be) -- [Feb 7th, 2019 11:00 AM video](https://youtu.be/U80b--oAlYM) -- [Mar 7th, 2019 11:00 AM video](https://youtu.be/w9TI5B7D1zg) -- [Apr 4th, 2019 11:00 AM video](https://youtu.be/oqQR2XH1L_A) -- [May 2nd, 2019 11:00 AM video](https://youtu.be/wFwQePuDWVw) -- [May 30th, 2019 11:00 AM video](https://youtu.be/2t1R5NATYG4) -- [Jul 11th, 2019 11:00 AM video](https://youtu.be/k_FZEipWD6Y) -- [Jul 25, 2019 11:00 AM video](https://youtu.be/VSUJTACO93I) -- [Aug 22, 2019 11:00 AM video](https://youtu.be/6IBQ-VxQmuM) -- [Sep 19, 2019 11:00 AM video](https://youtu.be/SqfxU9DhBOc) -- Nov 14, 2019 11:00 AM -- Dec 12, 2019 11:00 AM -- Jan 09, 2020 11:00 AM -- Feb 06, 2020 11:00 AM -- Mar 05, 2020 11:00 AM -- Apr 02, 2020 11:00 AM -- Apr 30, 2020 11:00 AM -- May 28, 2020 11:00 AM -- Jun 25, 2020 11:00 AM -- Jul 23, 2020 11:00 AM -- Aug 20, 2020 11:00 AM -- Sep 17, 2020 11:00 AM -- Oct 15, 2020 11:00 AM -- Nov 12, 2020 11:00 AM -- Dec 10, 2020 11:00 AM +Meeting recordings are uploaded to [Etcd YouTube channel]. Join Hangouts Meet: [meet.google.com/umg-nrxn-qvs](https://meet.google.com/umg-nrxn-qvs) Join by phone: +1 405-792-0633‬ PIN: ‪299 906‬# +[shared-meeting-notes]: https://docs.google.com/document/d/16XEGyPBisZvmmoIHSZzv__LoyOeluC5a4x353CX0SIM/edit +[Etcd YouTube channel]: https://www.youtube.com/channel/UC7tUWR24I5AR9NMsG-NYBlg + +## Maintainers + +[MAINTAINERS](MAINTAINERS) strive to shape an inclusive open source project culture where users are heard and contributors feel respected and empowered. MAINTAINERS maintain productive relationships across different companies and disciplines. Read more about [MAINTAINERS role and responsibilities](GOVERNANCE.md#maintainers). ## Getting started @@ -84,11 +61,11 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic For more installation guides, please check out [play.etcd.io](http://play.etcd.io) and [operating etcd](https://etcd.io/docs/latest/op-guide). -For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `main` branch. This first needs [*Go*](https://golang.org/) installed ([version 1.16+](/go.mod#L3) is required). All development occurs on `main`, including new features and bug fixes. Bug fixes are first targeted at `main` and subsequently ported to release branches, as described in the [branch management][branch-management] guide. +For those wanting to try the very latest version, [build the latest version of etcd][dl-build] from the `main` branch. This first needs [*Go*](https://golang.org/) installed ([version 1.17+](/go.mod#L3) is required). All development occurs on `main`, including new features and bug fixes. Bug fixes are first targeted at `main` and subsequently ported to release branches, as described in the [branch management][branch-management] guide. [github-release]: https://github.com/etcd-io/etcd/releases [branch-management]: https://etcd.io/docs/latest/branch_management -[dl-build]: https://etcd.io/docs/latest/dl-build#build-the-latest-version +[dl-build]: https://etcd.io/docs/latest/install/#build-from-source ### Running etcd @@ -153,6 +130,12 @@ Follow the steps in [Procfile.learner](./Procfile.learner) to add a learner node goreman -f ./Procfile.learner start ``` +### Install etcd client v3 + +```bash +go get go.etcd.io/etcd/client/v3 +``` + ### Next steps Now it's time to dig into the full etcd API and other guides. @@ -186,7 +169,7 @@ See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches and the co ## Reporting bugs -See [reporting bugs](https://etcd.io/docs/latest/reporting-bugs) for details about reporting any issues. +See [reporting bugs](https://etcd.io/docs/latest/reporting_bugs/) for details about reporting any issues. ## Reporting a security vulnerability @@ -204,6 +187,7 @@ These emeritus maintainers dedicated a part of their career to etcd and reviewed * Fanmin Shi * Anthony Romano +* Brandon Philips ### License diff --git a/vendor/go.etcd.io/etcd/v3/bill-of-materials.json b/vendor/go.etcd.io/etcd/v3/bill-of-materials.json index 7317583b4d..82254a5aa7 100644 --- a/vendor/go.etcd.io/etcd/v3/bill-of-materials.json +++ b/vendor/go.etcd.io/etcd/v3/bill-of-materials.json @@ -17,6 +17,15 @@ } ] }, + { + "project": "github.com/cenkalti/backoff/v4", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, { "project": "github.com/certifi/gocertifi", "licenses": [ @@ -117,38 +126,29 @@ ] }, { - "project": "github.com/etcd-io/gofail/runtime", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/form3tech-oss/jwt-go", + "project": "github.com/getsentry/raven-go", "licenses": [ { - "type": "MIT License", - "confidence": 0.9891304347826086 + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 } ] }, { - "project": "github.com/getsentry/raven-go", + "project": "github.com/gogo/protobuf", "licenses": [ { "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9663865546218487 + "confidence": 0.9163346613545816 } ] }, { - "project": "github.com/gogo/protobuf", + "project": "github.com/golang-jwt/jwt", "licenses": [ { - "type": "BSD 3-clause \"New\" or \"Revised\" License", - "confidence": 0.9163346613545816 + "type": "MIT License", + "confidence": 0.9891304347826086 } ] }, @@ -179,6 +179,15 @@ } ] }, + { + "project": "github.com/google/go-cmp/cmp", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, { "project": "github.com/gorilla/websocket", "licenses": [ @@ -233,15 +242,6 @@ } ] }, - { - "project": "github.com/json-iterator/go", - "licenses": [ - { - "type": "MIT License", - "confidence": 1 - } - ] - }, { "project": "github.com/mattn/go-runewidth", "licenses": [ @@ -260,24 +260,6 @@ } ] }, - { - "project": "github.com/modern-go/concurrent", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "github.com/modern-go/reflect2", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, { "project": "github.com/olekukonko/tablewriter", "licenses": [ @@ -539,15 +521,6 @@ } ] }, - { - "project": "go.opentelemetry.io/contrib", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, { "project": "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", "licenses": [ @@ -567,7 +540,7 @@ ] }, { - "project": "go.opentelemetry.io/otel/exporters/otlp", + "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace", "licenses": [ { "type": "Apache License 2.0", @@ -576,7 +549,7 @@ ] }, { - "project": "go.opentelemetry.io/otel/metric", + "project": "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", "licenses": [ { "type": "Apache License 2.0", @@ -593,24 +566,6 @@ } ] }, - { - "project": "go.opentelemetry.io/otel/sdk/export/metric", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, - { - "project": "go.opentelemetry.io/otel/sdk/metric", - "licenses": [ - { - "type": "Apache License 2.0", - "confidence": 1 - } - ] - }, { "project": "go.opentelemetry.io/otel/trace", "licenses": [ @@ -768,6 +723,15 @@ } ] }, + { + "project": "sigs.k8s.io/json", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 0.9617021276595744 + } + ] + }, { "project": "sigs.k8s.io/yaml", "licenses": [ diff --git a/vendor/go.etcd.io/etcd/v3/build b/vendor/go.etcd.io/etcd/v3/build deleted file mode 100644 index 60aa15d768..0000000000 --- a/vendor/go.etcd.io/etcd/v3/build +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -echo -e "\\e[91mDEPRECATED!!! Use build.sh script instead.\\e[0m\\n" -sleep 1 - -source ./build.sh diff --git a/vendor/go.etcd.io/etcd/v3/build.bat b/vendor/go.etcd.io/etcd/v3/build.bat deleted file mode 100644 index ff9b209a70..0000000000 --- a/vendor/go.etcd.io/etcd/v3/build.bat +++ /dev/null @@ -1 +0,0 @@ -powershell -ExecutionPolicy Bypass -File build.ps1 diff --git a/vendor/go.etcd.io/etcd/v3/build.ps1 b/vendor/go.etcd.io/etcd/v3/build.ps1 deleted file mode 100644 index d1c36ee643..0000000000 --- a/vendor/go.etcd.io/etcd/v3/build.ps1 +++ /dev/null @@ -1,81 +0,0 @@ -$ORG_PATH="go.etcd.io" -$REPO_PATH="$ORG_PATH/etcd" -$PWD = $((Get-Item -Path ".\" -Verbose).FullName) -$FSROOT = $((Get-Location).Drive.Name+":") -$FSYS = $((Get-WMIObject win32_logicaldisk -filter "DeviceID = '$FSROOT'").filesystem) - -if ($FSYS.StartsWith("FAT","CurrentCultureIgnoreCase")) { - echo "Error: Cannot build etcd using the $FSYS filesystem (use NTFS instead)" - exit 1 -} - -# Set $Env:GO_LDFLAGS="-s" for building without symbols. -$GO_LDFLAGS="$Env:GO_LDFLAGS -X $REPO_PATH/version.GitSHA=$GIT_SHA" - -# rebuild symlinks -git ls-files -s cmd | select-string -pattern 120000 | ForEach { - $l = $_.ToString() - $lnkname = $l.Split(' ')[1] - $target = "$(git log -p HEAD -- $lnkname | select -last 2 | select -first 1)" - $target = $target.SubString(1,$target.Length-1).Replace("/","\") - $lnkname = $lnkname.Replace("/","\") - - $terms = $lnkname.Split("\") - $dirname = $terms[0..($terms.length-2)] -join "\" - $lnkname = "$PWD\$lnkname" - $targetAbs = "$((Get-Item -Path "$dirname\$target").FullName)" - $targetAbs = $targetAbs.Replace("/", "\") - - if (test-path -pathtype container "$targetAbs") { - if (Test-Path "$lnkname") { - if ((Get-Item "$lnkname") -is [System.IO.DirectoryInfo]) { - # rd so deleting junction doesn't take files with it - cmd /c rd "$lnkname" - } - } - if (Test-Path "$lnkname") { - if (!((Get-Item "$lnkname") -is [System.IO.DirectoryInfo])) { - cmd /c del /A /F "$lnkname" - } - } - cmd /c mklink /J "$lnkname" "$targetAbs" ">NUL" - } else { - # Remove file with symlink data (first run) - if (Test-Path "$lnkname") { - cmd /c del /A /F "$lnkname" - } - cmd /c mklink /H "$lnkname" "$targetAbs" ">NUL" - } -} - -if (-not $env:GOPATH) { - $orgpath="$PWD\gopath\src\" + $ORG_PATH.Replace("/", "\") - if (Test-Path "$orgpath\etcd") { - if ((Get-Item "$orgpath\etcd") -is [System.IO.DirectoryInfo]) { - # rd so deleting junction doesn't take files with it - cmd /c rd "$orgpath\etcd" - } - } - if (Test-Path "$orgpath") { - if ((Get-Item "$orgpath") -is [System.IO.DirectoryInfo]) { - # rd so deleting junction doesn't take files with it - cmd /c rd "$orgpath" - } - } - if (Test-Path "$orgpath") { - if (!((Get-Item "$orgpath") -is [System.IO.DirectoryInfo])) { - # Remove file with symlink data (first run) - cmd /c del /A /F "$orgpath" - } - } - cmd /c mkdir "$orgpath" - cmd /c mklink /J "$orgpath\etcd" "$PWD" ">NUL" - $env:GOPATH = "$PWD\gopath" -} - -# Static compilation is useful when etcd is run in a container -$env:CGO_ENABLED = 0 -$env:GO15VENDOREXPERIMENT = 1 -$GIT_SHA="$(git rev-parse --short HEAD)" -go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcd.exe "$REPO_PATH" -go build -a -installsuffix cgo -ldflags $GO_LDFLAGS -o bin\etcdctl.exe "$REPO_PATH\etcdctl" diff --git a/vendor/go.etcd.io/etcd/v3/build.sh b/vendor/go.etcd.io/etcd/v3/build.sh deleted file mode 100644 index 7a4f8670e6..0000000000 --- a/vendor/go.etcd.io/etcd/v3/build.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/usr/bin/env bash - -source ./scripts/test_lib.sh - -GIT_SHA=$(git rev-parse --short HEAD || echo "GitNotFound") -if [[ -n "$FAILPOINTS" ]]; then - GIT_SHA="$GIT_SHA"-FAILPOINTS -fi - -VERSION_SYMBOL="${ROOT_MODULE}/api/v3/version.GitSHA" - -# Set GO_LDFLAGS="-s" for building without symbols for debugging. -# shellcheck disable=SC2206 -GO_LDFLAGS=(${GO_LDFLAGS} "-X=${VERSION_SYMBOL}=${GIT_SHA}") -GO_BUILD_ENV=("CGO_ENABLED=0" "GO_BUILD_FLAGS=${GO_BUILD_FLAGS}" "GOOS=${GOOS}" "GOARCH=${GOARCH}") - -# enable/disable failpoints -toggle_failpoints() { - mode="$1" - if command -v gofail >/dev/null 2>&1; then - run gofail "$mode" server/etcdserver/ server/mvcc/backend/ - elif [[ "$mode" != "disable" ]]; then - log_error "FAILPOINTS set but gofail not found" - exit 1 - fi -} - -toggle_failpoints_default() { - mode="disable" - if [[ -n "$FAILPOINTS" ]]; then mode="enable"; fi - toggle_failpoints "$mode" -} - -etcd_build() { - out="bin" - if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi - toggle_failpoints_default - - run rm -f "${out}/etcd" - ( - cd ./server - # Static compilation is useful when etcd is run in a container. $GO_BUILD_FLAGS is OK - # shellcheck disable=SC2086 - run env "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcd" . || return 2 - ) || return 2 - - run rm -f "${out}/etcdutl" - # shellcheck disable=SC2086 - ( - cd ./etcdutl - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcdutl" . || return 2 - ) || return 2 - - run rm -f "${out}/etcdctl" - # shellcheck disable=SC2086 - ( - cd ./etcdctl - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" "${GO_BUILD_ENV[@]}" go build $GO_BUILD_FLAGS \ - -installsuffix=cgo \ - "-ldflags=${GO_LDFLAGS[*]}" \ - -o="../${out}/etcdctl" . || return 2 - ) || return 2 - # Verify whether symbol we overriden exists - # For cross-compiling we cannot run: ${out}/etcd --version | grep -q "Git SHA: ${GIT_SHA}" - - # We need symbols to do this check: - if [[ "${GO_LDFLAGS[*]}" != *"-s"* ]]; then - go tool nm "${out}/etcd" | grep "${VERSION_SYMBOL}" > /dev/null - if [[ "${PIPESTATUS[*]}" != "0 0" ]]; then - log_error "FAIL: Symbol ${VERSION_SYMBOL} not found in binary: ${out}/etcd" - return 2 - fi - fi -} - -tools_build() { - out="bin" - if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi - tools_path="tools/benchmark - tools/etcd-dump-db - tools/etcd-dump-logs - tools/local-tester/bridge" - for tool in ${tools_path} - do - echo "Building" "'${tool}'"... - run rm -f "${out}/${tool}" - # shellcheck disable=SC2086 - run env GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} \ - -installsuffix=cgo \ - "-ldflags='${GO_LDFLAGS[*]}'" \ - -o="${out}/${tool}" "./${tool}" || return 2 - done - tests_build "${@}" -} - -tests_build() { - out="bin" - if [[ -n "${BINDIR}" ]]; then out="${BINDIR}"; fi - tools_path=" - functional/cmd/etcd-agent - functional/cmd/etcd-proxy - functional/cmd/etcd-runner - functional/cmd/etcd-tester" - ( - cd tests || exit 2 - for tool in ${tools_path}; do - echo "Building" "'${tool}'"... - run rm -f "../${out}/${tool}" - - # shellcheck disable=SC2086 - run env CGO_ENABLED=0 GO_BUILD_FLAGS="${GO_BUILD_FLAGS}" go build ${GO_BUILD_FLAGS} \ - -installsuffix=cgo \ - "-ldflags='${GO_LDFLAGS[*]}'" \ - -o="../${out}/${tool}" "./${tool}" || return 2 - done - ) || return 2 -} - -toggle_failpoints_default - -# only build when called directly, not sourced -if echo "$0" | grep -E "build(.sh)?$" >/dev/null; then - if etcd_build; then - log_success "SUCCESS: etcd_build (GOARCH=${GOARCH})" - else - log_error "FAIL: etcd_build (GOARCH=${GOARCH})" - exit 2 - fi -fi diff --git a/vendor/go.etcd.io/etcd/v3/etcd.conf.yml.sample b/vendor/go.etcd.io/etcd/v3/etcd.conf.yml.sample index 0d7a2c6b3d..38d74bcb79 100644 --- a/vendor/go.etcd.io/etcd/v3/etcd.conf.yml.sample +++ b/vendor/go.etcd.io/etcd/v3/etcd.conf.yml.sample @@ -69,9 +69,6 @@ initial-cluster-state: 'new' # Reject reconfiguration requests that would cause quorum loss. strict-reconfig-check: false -# Accept etcd V2 client requests -enable-v2: true - # Enable runtime profiling data via HTTP server enable-pprof: true @@ -125,6 +122,9 @@ peer-transport-security: # Peer TLS using generated certificates. auto-tls: false +# The validity period of the self-signed certificate, the unit is year. +self-signed-cert-validity: 1 + # Enable debug-level logging for etcd. log-level: debug diff --git a/vendor/go.etcd.io/etcd/v3/test b/vendor/go.etcd.io/etcd/v3/test deleted file mode 100644 index a14782bc3c..0000000000 --- a/vendor/go.etcd.io/etcd/v3/test +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -echo -e "\\e[91mDEPRECATED!!! Use test.sh script instead.\\e[0m\\n" -sleep 1 - -source ./test.sh diff --git a/vendor/go.etcd.io/etcd/v3/test.sh b/vendor/go.etcd.io/etcd/v3/test.sh deleted file mode 100644 index 308cde4021..0000000000 --- a/vendor/go.etcd.io/etcd/v3/test.sh +++ /dev/null @@ -1,696 +0,0 @@ -#!/usr/bin/env bash -# -# Run all etcd tests -# ./test -# ./test -v -# -# -# Run specified test pass -# -# $ PASSES=unit ./test -# $ PASSES=integration ./test -# -# -# Run tests for one package -# Each pass has different default timeout, if you just run tests in one package or 1 test case then you can set TIMEOUT -# flag for different expectation -# -# $ PASSES=unit PKG=./wal TIMEOUT=1m ./test -# $ PASSES=integration PKG=./clientv3 TIMEOUT=1m ./test -# -# Run specified unit tests in one package -# To run all the tests with prefix of "TestNew", set "TESTCASE=TestNew "; -# to run only "TestNew", set "TESTCASE="\bTestNew\b"" -# -# $ PASSES=unit PKG=./wal TESTCASE=TestNew TIMEOUT=1m ./test -# $ PASSES=unit PKG=./wal TESTCASE="\bTestNew\b" TIMEOUT=1m ./test -# $ PASSES=integration PKG=./client/integration TESTCASE="\bTestV2NoRetryEOF\b" TIMEOUT=1m ./test -# -# -# Run code coverage -# COVERDIR must either be a absolute path or a relative path to the etcd root -# $ COVERDIR=coverage PASSES="build build_cov cov" ./test -# $ go tool cover -html ./coverage/cover.out -set -e -set -o pipefail - - -# Consider command as failed when any component of the pipe fails: -# https://stackoverflow.com/questions/1221833/pipe-output-and-capture-exit-status-in-bash -set -o pipefail - -# The test script is not supposed to make any changes to the files -# e.g. add/update missing dependencies. Such divergences should be -# detected and trigger a failure that needs explicit developer's action. -export GOFLAGS=-mod=readonly - -source ./scripts/test_lib.sh -source ./build.sh - -PASSES=${PASSES:-"fmt bom dep build unit"} -PKG=${PKG:-} - -if [ -z "$GOARCH" ]; then - GOARCH=$(go env GOARCH); -fi - -# determine whether target supports race detection -if [ -z "${RACE}" ] ; then - if [ "$GOARCH" == "amd64" ]; then - RACE="--race" - else - RACE="--race=false" - fi -else - RACE="--race=${RACE:-true}" -fi - -# This options make sense for cases where SUT (System Under Test) is compiled by test. -COMMON_TEST_FLAGS=("${RACE}") -if [[ -n "${CPU}" ]]; then - COMMON_TEST_FLAGS+=("--cpu=${CPU}") -fi - -log_callout "Running with ${COMMON_TEST_FLAGS[*]}" - -RUN_ARG=() -if [ -n "${TESTCASE}" ]; then - RUN_ARG=("-run=${TESTCASE}") -fi - -function build_pass { - log_callout "Building etcd" - run_for_modules run go build "${@}" || return 2 - GO_BUILD_FLAGS="-v" etcd_build "${@}" - GO_BUILD_FLAGS="-v" tools_build "${@}" -} - -################# REGULAR TESTS ################################################ - -# run_unit_tests [pkgs] runs unit tests for a current module and givesn set of [pkgs] -function run_unit_tests { - local pkgs="${1:-./...}" - shift 1 - # shellcheck disable=SC2086 - GOLANG_TEST_SHORT=true go_test "${pkgs}" "parallel" : -short -timeout="${TIMEOUT:-3m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" "$@" -} - -function unit_pass { - run_for_modules run_unit_tests "$@" -} - -function integration_extra { - if [ -z "${PKG}" ] ; then - run_for_module "." go_test "./contrib/raftexample" "keep_going" : -timeout="${TIMEOUT:-5m}" "${RUN_ARG[@]}" "${COMMON_TEST_FLAGS[@]}" "$@" || return $? - run_for_module "tests" go_test "./integration/v2store/..." "keep_going" : -tags v2v3 -timeout="${TIMEOUT:-5m}" "${RUN_ARG[@]}" "${COMMON_TEST_FLAGS[@]}" "$@" || return $? - else - log_warning "integration_extra ignored when PKG is specified" - fi -} - -function integration_pass { - local pkgs=${USERPKG:-"./integration/..."} - run_for_module "tests" go_test "${pkgs}" "parallel" : -timeout="${TIMEOUT:-15m}" "${COMMON_TEST_FLAGS[@]}" "${RUN_ARG[@]}" "$@" || return $? - integration_extra "$@" -} - -function e2e_pass { - # e2e tests are running pre-build binary. Settings like --race,-cover,-cpu does not have any impact. - run_for_module "tests" go_test "./e2e/..." "keep_going" : -timeout="${TIMEOUT:-30m}" "${RUN_ARG[@]}" "$@" -} - -function integration_e2e_pass { - run_pass "integration" "${@}" - run_pass "e2e" "${@}" -} - -# generic_checker [cmd...] -# executes given command in the current module, and clearly fails if it -# failed or returned output. -function generic_checker { - local cmd=("$@") - if ! output=$("${cmd[@]}"); then - echo "${output}" - log_error -e "FAIL: '${cmd[*]}' checking failed (!=0 return code)" - return 255 - fi - if [ -n "${output}" ]; then - echo "${output}" - log_error -e "FAIL: '${cmd[*]}' checking failed (printed output)" - return 255 - fi -} - -function functional_pass { - run ./tests/functional/build - - # Clean up any data and logs from previous runs - rm -rf /tmp/etcd-functional-* /tmp/etcd-functional-*.backup - - # TODO: These ports should be dynamically allocated instead of hard-coded. - for a in 1 2 3; do - ./bin/etcd-agent --network tcp --address 127.0.0.1:${a}9027 < /dev/null & - pid="$!" - agent_pids="${agent_pids} $pid" - done - - for a in 1 2 3; do - log_callout "Waiting for 'etcd-agent' on ${a}9027..." - while ! nc -z localhost ${a}9027; do - sleep 1 - done - done - - log_callout "functional test START!" - run ./bin/etcd-tester --config ./tests/functional/functional.yaml && log_success "'etcd-tester' succeeded" - local etcd_tester_exit_code=$? - - if [[ "${etcd_tester_exit_code}" -ne "0" ]]; then - log_error "ETCD_TESTER_EXIT_CODE:" ${etcd_tester_exit_code} - fi - - # shellcheck disable=SC2206 - agent_pids=($agent_pids) - kill -s TERM "${agent_pids[@]}" || true - - if [[ "${etcd_tester_exit_code}" -ne "0" ]]; then - log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-1/etcd.log'" - tail -1000 /tmp/etcd-functional-1/etcd.log - - log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-2/etcd.log'" - tail -1000 /tmp/etcd-functional-2/etcd.log - - log_error -e "\\nFAILED! 'tail -1000 /tmp/etcd-functional-3/etcd.log'" - tail -1000 /tmp/etcd-functional-3/etcd.log - - log_error "--- FAIL: exit code" ${etcd_tester_exit_code} - return ${etcd_tester_exit_code} - fi - log_success "functional test PASS!" -} - -function grpcproxy_pass { - run_for_module "tests" go_test "./integration/... ./e2e" "fail_fast" : \ - -timeout=30m -tags cluster_proxy "${COMMON_TEST_FLAGS[@]}" "$@" -} - -################# COVERAGE ##################################################### - -# Builds artifacts used by tests/e2e in coverage mode. -function build_cov_pass { - run_for_module "server" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcd_test" - run_for_module "etcdctl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdctl_test" - run_for_module "etcdutl" run go test -tags cov -c -covermode=set -coverpkg="./..." -o "../bin/etcdutl_test" -} - -# pkg_to_coverflag [prefix] [pkgs] -# produces name of .coverprofile file to be used for tests of this package -function pkg_to_coverprofileflag { - local prefix="${1}" - local pkgs="${2}" - local pkgs_normalized - prefix_normalized=$(echo "${prefix}" | tr "./ " "__+") - if [ "${pkgs}" == "./..." ]; then - pkgs_normalized="all" - else - pkgs_normalized=$(echo "${pkgs}" | tr "./ " "__+") - fi - mkdir -p "${coverdir}/${prefix_normalized}" - echo -n "-coverprofile=${coverdir}/${prefix_normalized}/${pkgs_normalized}.coverprofile" -} - -function not_test_packages { - for m in $(modules); do - if [[ $m =~ .*/etcd/tests/v3 ]]; then continue; fi - if [[ $m =~ .*/etcd/v3 ]]; then continue; fi - echo "${m}/..." - done -} - -# split_dir [dir] [num] -function split_dir { - local d="${1}" - local num="${2}" - local i=0 - for f in "${d}/"*; do - local g=$(( "${i}" % "${num}" )) - mkdir -p "${d}_${g}" - mv "${f}" "${d}_${g}/" - (( i++ )) - done -} - -function split_dir_pass { - split_dir ./covdir/integration 4 -} - - -# merge_cov_files [coverdir] [outfile] -# merges all coverprofile files into a single file in the given directory. -function merge_cov_files { - local coverdir="${1}" - local cover_out_file="${2}" - log_callout "Merging coverage results in: ${coverdir}" - # gocovmerge requires not-empty test to start with: - echo "mode: set" > "${cover_out_file}" - - local i=0 - local count - count=$(find "${coverdir}"/*.coverprofile | wc -l) - for f in "${coverdir}"/*.coverprofile; do - # print once per 20 files - if ! (( "${i}" % 20 )); then - log_callout "${i} of ${count}: Merging file: ${f}" - fi - run_go_tool "github.com/gyuho/gocovmerge" "${f}" "${cover_out_file}" > "${coverdir}/cover.tmp" 2>/dev/null - if [ -s "${coverdir}"/cover.tmp ]; then - mv "${coverdir}/cover.tmp" "${cover_out_file}" - fi - (( i++ )) - done -} - -# merge_cov [coverdir] -function merge_cov { - log_callout "[$(date)] Merging coverage files ..." - coverdir="${1}" - for d in "${coverdir}"/*/; do - d=${d%*/} # remove the trailing "/" - merge_cov_files "${d}" "${d}.coverprofile" & - done - wait - merge_cov_files "${coverdir}" "${coverdir}/all.coverprofile" -} - -function cov_pass { - # shellcheck disable=SC2153 - if [ -z "$COVERDIR" ]; then - log_error "COVERDIR undeclared" - return 255 - fi - - if [ ! -f "bin/etcd_test" ]; then - log_error "etcd_test binary not found. Call: PASSES='build_cov' ./test" - return 255 - fi - - local coverdir - coverdir=$(readlink -f "${COVERDIR}") - mkdir -p "${coverdir}" - find "${coverdir}" -print0 -name '*.coverprofile' | xargs -0 rm - - local covpkgs - covpkgs=$(not_test_packages) - local coverpkg_comma - coverpkg_comma=$(echo "${covpkgs[@]}" | xargs | tr ' ' ',') - local gocov_build_flags=("-covermode=set" "-coverpkg=$coverpkg_comma") - - local failed="" - - log_callout "[$(date)] Collecting coverage from unit tests ..." - for m in $(module_dirs); do - GOLANG_TEST_SHORT=true run_for_module "${m}" go_test "./..." "parallel" "pkg_to_coverprofileflag unit_${m}" -short -timeout=30m \ - "${gocov_build_flags[@]}" "$@" || failed="$failed unit" - done - - log_callout "[$(date)] Collecting coverage from integration tests ..." - run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration" \ - -timeout=30m "${gocov_build_flags[@]}" "$@" || failed="$failed integration" - # integration-store-v2 - run_for_module "tests" go_test "./integration/v2store/..." "keep_going" "pkg_to_coverprofileflag store_v2" \ - -tags v2v3 -timeout=5m "${gocov_build_flags[@]}" "$@" || failed="$failed integration_v2v3" - # integration_cluster_proxy - run_for_module "tests" go_test "./integration/..." "parallel" "pkg_to_coverprofileflag integration_cluster_proxy" \ - -tags cluster_proxy -timeout=5m "${gocov_build_flags[@]}" || failed="$failed integration_cluster_proxy" - - log_callout "[$(date)] Collecting coverage from e2e tests ..." - # We don't pass 'gocov_build_flags' nor 'pkg_to_coverprofileflag' here, - # as the coverage is collected from the ./bin/etcd_test & ./bin/etcdctl_test internally spawned. - mkdir -p "${coverdir}/e2e" - COVERDIR="${coverdir}/e2e" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags=cov -timeout 30m "$@" || failed="$failed tests_e2e" - split_dir "${coverdir}/e2e" 10 - - log_callout "[$(date)] Collecting coverage from e2e tests with proxy ..." - mkdir -p "${coverdir}/e2e_proxy" - COVERDIR="${coverdir}/e2e_proxy" run_for_module "tests" go_test "./e2e/..." "keep_going" : -tags="cov cluster_proxy" -timeout 30m "$@" || failed="$failed tests_e2e_proxy" - split_dir "${coverdir}/e2e_proxy" 10 - - local cover_out_file="${coverdir}/all.coverprofile" - merge_cov "${coverdir}" - - # strip out generated files (using GNU-style sed) - sed --in-place -E "/[.]pb[.](gw[.])?go/d" "${cover_out_file}" || true - - sed --in-place -E "s|go.etcd.io/etcd/api/v3/|api/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/v3/|client/v3/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/v2/|client/v2/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/client/pkg/v3|client/pkg/v3/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/etcdctl/v3/|etcdctl/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/etcdutl/v3/|etcdutl/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/pkg/v3/|pkg/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/raft/v3/|raft/|g" "${cover_out_file}" || true - sed --in-place -E "s|go.etcd.io/etcd/server/v3/|server/|g" "${cover_out_file}" || true - - # held failures to generate the full coverage file, now fail - if [ -n "$failed" ]; then - for f in $failed; do - log_error "--- FAIL:" "$f" - done - log_warning "Despite failures, you can see partial report:" - log_warning " go tool cover -html ${cover_out_file}" - return 255 - fi - - log_success "done :) [see report: go tool cover -html ${cover_out_file}]" -} - -######### Code formatting checkers ############################################# - -function fmt_pass { - toggle_failpoints disable - - # TODO: add "unparam","staticcheck", "unconvert", "ineffasign","nakedret" - # after resolving ore-existing errors. - # markdown_you - too sensitive check was temporarilly disbled. - for p in shellcheck \ - goword \ - gofmt \ - govet \ - revive \ - license_header \ - receiver_name \ - mod_tidy \ - dep \ - shellcheck \ - shellws \ - ; do - run_pass "${p}" "${@}" - done -} - -function shellcheck_pass { - if tool_exists "shellcheck" "https://github.com/koalaman/shellcheck#installing"; then - generic_checker run shellcheck -fgcc build test scripts/*.sh ./*.sh - fi -} - -function shellws_pass { - TAB=$'\t' - log_callout "Ensuring no tab-based indention in shell scripts" - local files - files=$(find ./ -name '*.sh' -print0 | xargs -0 ) - # shellcheck disable=SC2206 - files=( ${files[@]} "./scripts/build-binary" "./scripts/build-docker" "./scripts/release" ) - log_cmd "grep -E -n $'^ *${TAB}' ${files[*]}" - # shellcheck disable=SC2086 - if grep -E -n $'^ *${TAB}' "${files[@]}" | sed $'s|${TAB}|[\\\\tab]|g'; then - log_error "FAIL: found tab-based indention in bash scripts. Use ' ' (double space)." - local files_with_tabs - files_with_tabs=$(grep -E -l $'^ *\\t' "${files[@]}") - log_warning "Try: sed -i 's|\\t| |g' $files_with_tabs" - return 1 - else - log_success "SUCCESS: no tabulators found." - return 0 - fi -} - -function markdown_you_find_eschew_you { - local find_you_cmd="find . -name \\*.md ! -path '*/vendor/*' ! -path './Documentation/*' ! -path './gopath.proto/*' ! -path './release/*' -exec grep -E --color '[Yy]ou[r]?[ '\\''.,;]' {} + || true" - run eval "${find_you_cmd}" -} - -function markdown_you_pass { - generic_checker markdown_you_find_eschew_you -} - -function markdown_marker_pass { - # TODO: check other markdown files when marker handles headers with '[]' - if tool_exists "marker" "https://crates.io/crates/marker"; then - generic_checker run marker --skip-http --root ./Documentation 2>&1 - fi -} - -function govet_pass { - run_for_modules generic_checker run go vet -} - -function govet_shadow_pass { - local shadow - shadow=$(tool_get_bin "golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow") - run_for_modules generic_checker run go vet -all -vettool="${shadow}" -} - -function unparam_pass { - run_for_modules generic_checker run_go_tool "mvdan.cc/unparam" -} - -function staticcheck_pass { - run_for_modules generic_checker run_go_tool "honnef.co/go/tools/cmd/staticcheck" -} - -function revive_pass { - run_for_modules generic_checker run_go_tool "github.com/mgechev/revive" -config "${ETCD_ROOT_DIR}/tests/revive.toml" -exclude "vendor/..." -} - -function unconvert_pass { - run_for_modules generic_checker run_go_tool "github.com/mdempsky/unconvert" unconvert -v -} - -function ineffassign_per_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - run_go_tool github.com/gordonklaus/ineffassign "${gofiles[@]}" -} - -function ineffassign_pass { - run_for_modules generic_checker ineffassign_per_package -} - -function nakedret_pass { - run_for_modules generic_checker run_go_tool "github.com/alexkohler/nakedret" -} - -function license_header_pass { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - - for file in "${gofiles[@]}"; do - if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then - licRes="${licRes}"$(echo -e " ${file}") - fi - done - if [ -n "${licRes}" ]; then - log_error -e "license header checking failed:\\n${licRes}" - return 255 - fi -} - -function receiver_name_for_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - - recvs=$(grep 'func ([^*]' "${gofiles[@]}" | tr ':' ' ' | \ - awk ' { print $2" "$3" "$4" "$1 }' | sed "s/[a-zA-Z\\.]*go//g" | sort | uniq | \ - grep -Ev "(Descriptor|Proto|_)" | awk ' { print $3" "$4 } ' | sort | uniq -c | grep -v ' 1 ' | awk ' { print $2 } ') - if [ -n "${recvs}" ]; then - # shellcheck disable=SC2206 - recvs=($recvs) - for recv in "${recvs[@]}"; do - log_error "Mismatched receiver for $recv..." - grep "$recv" "${gofiles[@]}" | grep 'func (' - done - return 255 - fi -} - -function receiver_name_pass { - run_for_modules receiver_name_for_package -} - -# goword_for_package package -# checks spelling and comments in the 'package' in the current module -# -function goword_for_package { - # bash 3.x compatible replacement of: mapfile -t gofiles < <(go_srcs_in_module "$1") - local gofiles=() - while IFS= read -r line; do gofiles+=("$line"); done < <(go_srcs_in_module "$1") - - local gowordRes - - # spellchecking can be enabled with GOBINARGS="--tags=spell" - # but it requires heavy dependencies installation, like: - # apt-get install libaspell-dev libhunspell-dev hunspell-en-us aspell-en - - # only check for broke exported godocs - if gowordRes=$(run_go_tool "github.com/chzchzchz/goword" -use-spell=false "${gofiles[@]}" | grep godoc-export | sort); then - log_error -e "goword checking failed:\\n${gowordRes}" - return 255 - fi - if [ -n "$gowordRes" ]; then - log_error -e "goword checking returned output:\\n${gowordRes}" - return 255 - fi -} - - -function goword_pass { - run_for_modules goword_for_package || return 255 -} - -function go_fmt_for_package { - # We utilize 'go fmt' to find all files suitable for formatting, - # but reuse full power gofmt to perform just RO check. - go fmt -n "$1" | sed 's| -w | -d |g' | sh -} - -function gofmt_pass { - run_for_modules generic_checker go_fmt_for_package -} - -function bom_pass { - log_callout "Checking bill of materials..." - # https://github.com/golang/go/commit/7c388cc89c76bc7167287fb488afcaf5a4aa12bf - # shellcheck disable=SC2207 - modules=($(modules_exp)) - - # Internally license-bill-of-materials tends to modify go.sum - run cp go.sum go.sum.tmp || return 2 - run cp go.mod go.mod.tmp || return 2 - - output=$(GOFLAGS=-mod=mod run_go_tool github.com/coreos/license-bill-of-materials \ - --override-file ./bill-of-materials.override.json \ - "${modules[@]}") - code="$?" - - run cp go.sum.tmp go.sum || return 2 - run cp go.mod.tmp go.mod || return 2 - - if [ "${code}" -ne 0 ] ; then - log_error -e "license-bill-of-materials (code: ${code}) failed with:\\n${output}" - return 255 - else - echo "${output}" > "bom-now.json.tmp" - fi - if ! diff ./bill-of-materials.json bom-now.json.tmp; then - log_error "modularized licenses do not match given bill of materials" - return 255 - fi - rm bom-now.json.tmp -} - -######## VARIOUS CHECKERS ###################################################### - -function dump_deps_of_module() { - local module - if ! module=$(run go list -m); then - return 255 - fi - run go list -f "{{if not .Indirect}}{{if .Version}}{{.Path}},{{.Version}},${module}{{end}}{{end}}" -m all -} - -# Checks whether dependencies are consistent across modules -function dep_pass { - local all_dependencies - all_dependencies=$(run_for_modules dump_deps_of_module | sort) || return 2 - - local duplicates - duplicates=$(echo "${all_dependencies}" | cut -d ',' -f 1,2 | sort | uniq | cut -d ',' -f 1 | sort | uniq -d) || return 2 - - for dup in ${duplicates}; do - log_error "FAIL: inconsistent versions for depencency: ${dup}" - echo "${all_dependencies}" | grep "${dup}" | sed "s|\\([^,]*\\),\\([^,]*\\),\\([^,]*\\)| - \\1@\\2 from: \\3|g" - done - if [[ -n "${duplicates}" ]]; then - log_error "FAIL: inconsistent dependencies" - return 2 - else - log_success "SUCCESS: dependencies are consistent across modules" - fi -} - -function release_pass { - rm -f ./bin/etcd-last-release - # to grab latest patch release; bump this up for every minor release - UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.4.*" | head -1) - if [ -n "$MANUAL_VER" ]; then - # in case, we need to test against different version - UPGRADE_VER=$MANUAL_VER - fi - if [[ -z ${UPGRADE_VER} ]]; then - UPGRADE_VER="v3.3.0" - log_warning "fallback to" ${UPGRADE_VER} - fi - - local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz" - log_callout "Downloading $file" - - set +e - curl --fail -L "https://github.com/etcd-io/etcd/releases/download/$UPGRADE_VER/$file" -o "/tmp/$file" - local result=$? - set -e - case $result in - 0) ;; - *) log_error "--- FAIL:" ${result} - return $result - ;; - esac - - tar xzvf "/tmp/$file" -C /tmp/ --strip-components=1 - mkdir -p ./bin - mv /tmp/etcd ./bin/etcd-last-release -} - -function mod_tidy_for_module { - # Watch for upstream solution: https://github.com/golang/go/issues/27005 - local tmpModDir - tmpModDir=$(mktemp -d -t 'tmpModDir.XXXXXX') - run cp "./go.mod" "${tmpModDir}" || return 2 - - # Guarantees keeping go.sum minimal - # If this is causing too much problems, we should - # stop controlling go.sum at all. - rm go.sum - run go mod tidy || return 2 - - set +e - local tmpFileGoModInSync - diff -C 5 "${tmpModDir}/go.mod" "./go.mod" - tmpFileGoModInSync="$?" - - # Bring back initial state - mv "${tmpModDir}/go.mod" "./go.mod" - - if [ "${tmpFileGoModInSync}" -ne 0 ]; then - log_error "${PWD}/go.mod is not in sync with 'go mod tidy'" - return 255 - fi -} - -function mod_tidy_pass { - run_for_modules mod_tidy_for_module -} - -########### MAIN ############################################################### - -function run_pass { - local pass="${1}" - shift 1 - log_callout -e "\\n'${pass}' started at $(date)" - if "${pass}_pass" "$@" ; then - log_success "'${pass}' completed at $(date)" - else - log_error "FAIL: '${pass}' failed at $(date)" - exit 255 - fi -} - -log_callout "Starting at: $(date)" -for pass in $PASSES; do - run_pass "${pass}" "${@}" -done - -log_success "SUCCESS" diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go index 2c861b5cd3..098ed69f98 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go @@ -13,6 +13,7 @@ import ( "go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" ) var ( @@ -43,7 +44,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { @@ -118,11 +119,32 @@ type EncodeContext struct { type DecodeContext struct { *Registry Truncate bool + // Ancestor is the type of a containing document. This is mainly used to determine what type // should be used when decoding an embedded document into an empty interface. For example, if // Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface // will be decoded into a bson.M. + // + // Deprecated: Use DefaultDocumentM or DefaultDocumentD instead. Ancestor reflect.Type + + // defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the + // usage for this field is restricted to data typed as "interface{}" or "map[string]interface{}". If DocumentType is + // set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an + // error. DocumentType overrides the Ancestor field. + defaultDocumentType reflect.Type +} + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentM() { + dc.defaultDocumentType = reflect.TypeOf(primitive.M{}) +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (dc *DecodeContext) DefaultDocumentD() { + dc.defaultDocumentType = reflect.TypeOf(primitive.D{}) } // ValueCodec is the interface that groups the methods to encode and decode diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 32fd142787..e95cab585f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -53,7 +53,7 @@ type DefaultValueDecoders struct{} // RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with // the provided RegistryBuilder. // -// There is no support for decoding map[string]interface{} becuase there is no decoder for +// There is no support for decoding map[string]interface{} because there is no decoder for // interface{}, so users must either register this decoder themselves or use the // EmptyInterfaceDecoder available in the bson package. func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) { @@ -1463,7 +1463,7 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr if !val.CanAddr() { return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) @@ -1492,16 +1492,28 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson val.Set(reflect.New(val.Type().Elem())) } + _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) + if err != nil { + return err + } + + // If the target Go value is a pointer and the BSON field value is empty, set the value to the + // zero value of the pointer (nil) and don't call UnmarshalBSON. UnmarshalBSON has no way to + // change the pointer value from within the function (only the value at the pointer address), + // so it can't set the pointer to "nil" itself. Since the most common Go value for an empty BSON + // field value is "nil", we set "nil" here and don't call UnmarshalBSON. This behavior matches + // the behavior of the Go "encoding/json" unmarshaler when the target Go value is a pointer and + // the JSON field value is "null". + if val.Kind() == reflect.Ptr && len(src) == 0 { + val.Set(reflect.Zero(val.Type())) + return nil + } + if !val.Type().Implements(tUnmarshaler) { if !val.CanAddr() { return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val} } - val = val.Addr() // If they type doesn't implement the interface, a pointer to it must. - } - - _, src, err := bsonrw.Copier{}.CopyValueToBytes(vr) - if err != nil { - return err + val = val.Addr() // If the type doesn't implement the interface, a pointer to it must. } fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON") diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index c1e20f9489..b0ae0e23ff 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package bsoncodec provides a system for encoding values to BSON representations and decoding // values from BSON representations. This package considers both binary BSON and ExtendedJSON as // BSON representations. The types in this package enable a flexible system for handling this diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index a15636d0a8..eda417cff8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -57,11 +57,18 @@ func (eic EmptyInterfaceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWrit func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, valueType bsontype.Type) (reflect.Type, error) { isDocument := valueType == bsontype.Type(0) || valueType == bsontype.EmbeddedDocument - if isDocument && dc.Ancestor != nil { - // Using ancestor information rather than looking up the type map entry forces consistent decoding. - // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry - // has been registered. - return dc.Ancestor, nil + if isDocument { + if dc.defaultDocumentType != nil { + // If the bsontype is an embedded document and the DocumentType is set on the DecodeContext, then return + // that type. + return dc.defaultDocumentType, nil + } + if dc.Ancestor != nil { + // Using ancestor information rather than looking up the type map entry forces consistent decoding. + // If we're decoding into a bson.D, subdocuments should also be decoded as bson.D, even if a type map entry + // has been registered. + return dc.Ancestor, nil + } } rtype, err := dc.LookupTypeMapEntry(valueType) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 1f7acbcf16..e1fbef9c6c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "encoding" "fmt" "reflect" "strconv" @@ -230,6 +231,19 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } return "", err } + // keys implement encoding.TextMarshaler are marshaled. + if km, ok := val.Interface().(encoding.TextMarshaler); ok { + if val.Kind() == reflect.Ptr && val.IsNil() { + return "", nil + } + + buf, err := km.MarshalText() + if err != nil { + return "", err + } + + return string(buf), nil + } switch val.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -241,6 +255,7 @@ func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) { } var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) { keyVal := reflect.ValueOf(key) @@ -252,6 +267,12 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, v := keyVal.Interface().(KeyUnmarshaler) err = v.UnmarshalKey(key) keyVal = keyVal.Elem() + // Try to decode encoding.TextUnmarshalers. + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + keyVal = reflect.New(keyType) + v := keyVal.Interface().(encoding.TextUnmarshaler) + err = v.UnmarshalText([]byte(key)) + keyVal = keyVal.Elem() // Otherwise, go to type specific behavior default: switch keyType.Kind() { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index 02b9341ffe..f6f3800d40 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -298,7 +298,7 @@ func (rb *RegistryBuilder) Build() *Registry { return registry } -// LookupEncoder inspects the registry for an encoder for the given type. The lookup precendence works as follows: +// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows: // // 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using // RegisterTypeEncoder for the interface will be selected. @@ -376,7 +376,7 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value return nil, false } -// LookupDecoder inspects the registry for an decoder for the given type. The lookup precendence works as follows: +// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows: // // 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using // RegisterTypeDecoder for the interface will be selected. diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 8a690e37ce..54c76bf746 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -423,7 +423,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { if ejp.canonical { return nil, invalidJSONErrorForType("object", t) } - return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t) + return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as described in RFC-3339", t) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index 5e147373bc..ef5d837c2f 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -86,12 +86,11 @@ type valueReader struct { // NewBSONDocumentReader returns a ValueReader using b for the underlying BSON // representation. Parameter b must be a BSON Document. -// -// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes -// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes -// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a -// thing that can return the finished []byte since it might be reallocated when appended to. func NewBSONDocumentReader(b []byte) ValueReader { + // TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes a []byte while the + // TODO writer takes an io.Writer. We should have two versions of each, one that takes a []byte and one that takes an + // TODO io.Reader or io.Writer. The []byte version will need to return a thing that can return the finished []byte since + // TODO it might be reallocated when appended to. return newValueReader(b) } @@ -384,9 +383,13 @@ func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) { if err != nil { return nil, 0, err } + // Make a copy of the returned byte slice because it's just a subslice from the valueReader's + // buffer and is not safe to return in the unmarshaled value. + cp := make([]byte, len(b)) + copy(cp, b) vr.pop() - return b, btype, nil + return cp, btype, nil } func (vr *valueReader) ReadBoolean() (bool, error) { @@ -737,6 +740,9 @@ func (vr *valueReader) ReadValue() (ValueReader, error) { return vr, nil } +// readBytes reads length bytes from the valueReader starting at the current offset. Note that the +// returned byte slice is a subslice from the valueReader buffer and must be converted or copied +// before returning in an unmarshaled value. func (vr *valueReader) readBytes(length int32) ([]byte, error) { if length < 0 { return nil, fmt.Errorf("invalid length: %d", length) @@ -748,6 +754,7 @@ func (vr *valueReader) readBytes(length int32) ([]byte, error) { start := vr.offset vr.offset += int64(length) + return vr.d[start : start+int64(length)], nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go index a39c4ea4cb..f95a08afd5 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go @@ -529,7 +529,7 @@ func (vw *valueWriter) WriteDocumentEnd() error { vw.pop() if vw.stack[vw.frame].mode == mCodeWithScope { - // We ignore the error here because of the gaurantee of writeLength. + // We ignore the error here because of the guarantee of writeLength. // See the docs for writeLength for more info. _ = vw.writeLength() vw.pop() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go index 7f6b7694f9..6e189fa589 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/decoder.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/decoder.go @@ -33,6 +33,11 @@ var decPool = sync.Pool{ type Decoder struct { dc bsoncodec.DecodeContext vr bsonrw.ValueReader + + // We persist defaultDocumentM and defaultDocumentD on the Decoder to prevent overwriting from + // (*Decoder).SetContext. + defaultDocumentM bool + defaultDocumentD bool } // NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr. @@ -95,6 +100,12 @@ func (d *Decoder) Decode(val interface{}) error { if err != nil { return err } + if d.defaultDocumentM { + d.dc.DefaultDocumentM() + } + if d.defaultDocumentD { + d.dc.DefaultDocumentD() + } return decoder.DecodeValue(d.dc, d.vr, rval) } @@ -116,3 +127,15 @@ func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error { d.dc = dc return nil } + +// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentM() { + d.defaultDocumentM = true +} + +// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as +// "interface{}" or "map[string]interface{}". +func (d *Decoder) DefaultDocumentD() { + d.defaultDocumentD = true +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 094be934f0..5e3825a231 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -118,7 +118,7 @@ // types, this tag is ignored. // // 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be trucated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, +// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // diff --git a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go index 79f038581e..db8d8ee92b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/marshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/marshal.go @@ -225,10 +225,13 @@ func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val return *sw, nil } +// IndentExtJSON will prefix and indent the provided extended JSON src and append it to dst. func IndentExtJSON(dst *bytes.Buffer, src []byte, prefix, indent string) error { return json.Indent(dst, src, prefix, indent) } +// MarshalExtJSONIndent returns the extended JSON encoding of val with each line with prefixed +// and indented. func MarshalExtJSONIndent(val interface{}, canonical, escapeHTML bool, prefix, indent string) ([]byte, error) { marshaled, err := MarshalExtJSON(val, canonical, escapeHTML) if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index 09062d2085..16d7573e75 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -13,7 +13,7 @@ import "go.mongodb.org/mongo-driver/bson/bsoncodec" var DefaultRegistry = NewRegistryBuilder().Build() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and -// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the +// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go index 6f9ca04d3c..f936ba1836 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go @@ -23,7 +23,7 @@ type Unmarshaler interface { } // ValueUnmarshaler is an interface implemented by types that can unmarshal a -// BSON value representaiton of themselves. The BSON bytes and type can be +// BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. type ValueUnmarshaler interface { diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index d95deef01e..ac05e401cc 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -70,19 +70,22 @@ const ( ReasonStale = "stale" ReasonConnectionErrored = "connectionError" ReasonTimedOut = "timeout" + ReasonError = "error" ) // strings for pool command monitoring types const ( - ConnectionClosed = "ConnectionClosed" PoolCreated = "ConnectionPoolCreated" + PoolReady = "ConnectionPoolReady" + PoolCleared = "ConnectionPoolCleared" + PoolClosedEvent = "ConnectionPoolClosed" ConnectionCreated = "ConnectionCreated" ConnectionReady = "ConnectionReady" + ConnectionClosed = "ConnectionClosed" + GetStarted = "ConnectionCheckOutStarted" GetFailed = "ConnectionCheckOutFailed" GetSucceeded = "ConnectionCheckedOut" ConnectionReturned = "ConnectionCheckedIn" - PoolCleared = "ConnectionPoolCleared" - PoolClosedEvent = "ConnectionPoolClosed" ) // MonitorPoolOptions contains pool options as formatted in pool events diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go new file mode 100644 index 0000000000..635d8e3538 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle_util.go @@ -0,0 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "fmt" + + "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" +) + +const ( + EncryptedCacheCollection = "ecc" + EncryptedStateCollection = "esc" + EncryptedCompactionCollection = "ecoc" +) + +// GetEncryptedStateCollectionName returns the encrypted state collection name associated with dataCollectionName. +func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionName string, stateCollection string) (string, error) { + fieldName := stateCollection + "Collection" + val, err := efBSON.LookupErr(fieldName) + if err != nil { + if err != bsoncore.ErrElementNotFound { + return "", err + } + // Return default name. + defaultName := "enxcol_." + dataCollectionName + "." + stateCollection + return defaultName, nil + } + + stateCollectionName, ok := val.StringValueOK() + if !ok { + return "", fmt.Errorf("expected string for '%v', got: %v", fieldName, val.Type) + } + return stateCollectionName, nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go new file mode 100644 index 0000000000..ea07637bc5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot_util.go @@ -0,0 +1,34 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package internal + +import ( + "context" + "time" +) + +type timeoutKey struct{} + +// MakeTimeoutContext returns a new context with Client-Side Operation Timeout (CSOT) feature-gated behavior +// and a Timeout set to the passed in Duration. Setting a Timeout on a single operation is not supported in +// public API. +// +// TODO(GODRIVER-2348) We may be able to remove this function once CSOT feature-gated behavior becomes the +// TODO default behavior. +func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) { + // Only use the passed in Duration as a timeout on the Context if it + // is non-zero. + cancelFunc := func() {} + if to != 0 { + ctx, cancelFunc = context.WithTimeout(ctx, to) + } + return context.WithValue(ctx, timeoutKey{}, true), cancelFunc +} + +func IsTimeoutContext(ctx context.Context) bool { + return ctx.Value(timeoutKey{}) != nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/error.go b/vendor/go.mongodb.org/mongo-driver/internal/error.go index 6a105af4ff..1fec3f1835 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/error.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/error.go @@ -117,3 +117,7 @@ func (e *wrappedError) Error() string { func (e *wrappedError) Inner() error { return e.inner } + +func (e *wrappedError) Unwrap() error { + return e.inner +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go new file mode 100644 index 0000000000..4479009144 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/bits.go @@ -0,0 +1,38 @@ +// Copied from https://cs.opensource.google/go/go/+/946b4baaf6521d521928500b2b57429c149854e7:src/math/bits.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +// Add64 returns the sum with carry of x, y and carry: sum = x + y + carry. +// The carry input must be 0 or 1; otherwise the behavior is undefined. +// The carryOut output is guaranteed to be 0 or 1. +func Add64(x, y, carry uint64) (sum, carryOut uint64) { + yc := y + carry + sum = x + yc + if sum < x || yc < y { + carryOut = 1 + } + return +} + +// Mul64 returns the 128-bit product of x and y: (hi, lo) = x * y +// with the product bits' upper half returned in hi and the lower +// half returned in lo. +func Mul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go new file mode 100644 index 0000000000..859e4e0e42 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/exp.go @@ -0,0 +1,223 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/exp.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Exponential distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + re = 7.69711747013104972 +) + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1). +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func (r *Rand) ExpFloat64() float64 { + for { + j := r.Uint32() + i := j & 0xFF + x := float64(j) * float64(we[i]) + if j < ke[i] { + return x + } + if i == 0 { + return re - math.Log(r.Float64()) + } + if fe[i]+float32(r.Float64())*(fe[i-1]-fe[i]) < float32(math.Exp(-x)) { + return x + } + } +} + +var ke = [256]uint32{ + 0xe290a139, 0x0, 0x9beadebc, 0xc377ac71, 0xd4ddb990, + 0xde893fb8, 0xe4a8e87c, 0xe8dff16a, 0xebf2deab, 0xee49a6e8, + 0xf0204efd, 0xf19bdb8e, 0xf2d458bb, 0xf3da104b, 0xf4b86d78, + 0xf577ad8a, 0xf61de83d, 0xf6afb784, 0xf730a573, 0xf7a37651, + 0xf80a5bb6, 0xf867189d, 0xf8bb1b4f, 0xf9079062, 0xf94d70ca, + 0xf98d8c7d, 0xf9c8928a, 0xf9ff175b, 0xfa319996, 0xfa6085f8, + 0xfa8c3a62, 0xfab5084e, 0xfadb36c8, 0xfaff0410, 0xfb20a6ea, + 0xfb404fb4, 0xfb5e2951, 0xfb7a59e9, 0xfb95038c, 0xfbae44ba, + 0xfbc638d8, 0xfbdcf892, 0xfbf29a30, 0xfc0731df, 0xfc1ad1ed, + 0xfc2d8b02, 0xfc3f6c4d, 0xfc5083ac, 0xfc60ddd1, 0xfc708662, + 0xfc7f8810, 0xfc8decb4, 0xfc9bbd62, 0xfca9027c, 0xfcb5c3c3, + 0xfcc20864, 0xfccdd70a, 0xfcd935e3, 0xfce42ab0, 0xfceebace, + 0xfcf8eb3b, 0xfd02c0a0, 0xfd0c3f59, 0xfd156b7b, 0xfd1e48d6, + 0xfd26daff, 0xfd2f2552, 0xfd372af7, 0xfd3eeee5, 0xfd4673e7, + 0xfd4dbc9e, 0xfd54cb85, 0xfd5ba2f2, 0xfd62451b, 0xfd68b415, + 0xfd6ef1da, 0xfd750047, 0xfd7ae120, 0xfd809612, 0xfd8620b4, + 0xfd8b8285, 0xfd90bcf5, 0xfd95d15e, 0xfd9ac10b, 0xfd9f8d36, + 0xfda43708, 0xfda8bf9e, 0xfdad2806, 0xfdb17141, 0xfdb59c46, + 0xfdb9a9fd, 0xfdbd9b46, 0xfdc170f6, 0xfdc52bd8, 0xfdc8ccac, + 0xfdcc542d, 0xfdcfc30b, 0xfdd319ef, 0xfdd6597a, 0xfdd98245, + 0xfddc94e5, 0xfddf91e6, 0xfde279ce, 0xfde54d1f, 0xfde80c52, + 0xfdeab7de, 0xfded5034, 0xfdefd5be, 0xfdf248e3, 0xfdf4aa06, + 0xfdf6f984, 0xfdf937b6, 0xfdfb64f4, 0xfdfd818d, 0xfdff8dd0, + 0xfe018a08, 0xfe03767a, 0xfe05536c, 0xfe07211c, 0xfe08dfc9, + 0xfe0a8fab, 0xfe0c30fb, 0xfe0dc3ec, 0xfe0f48b1, 0xfe10bf76, + 0xfe122869, 0xfe1383b4, 0xfe14d17c, 0xfe1611e7, 0xfe174516, + 0xfe186b2a, 0xfe19843e, 0xfe1a9070, 0xfe1b8fd6, 0xfe1c8289, + 0xfe1d689b, 0xfe1e4220, 0xfe1f0f26, 0xfe1fcfbc, 0xfe2083ed, + 0xfe212bc3, 0xfe21c745, 0xfe225678, 0xfe22d95f, 0xfe234ffb, + 0xfe23ba4a, 0xfe241849, 0xfe2469f2, 0xfe24af3c, 0xfe24e81e, + 0xfe25148b, 0xfe253474, 0xfe2547c7, 0xfe254e70, 0xfe25485a, + 0xfe25356a, 0xfe251586, 0xfe24e88f, 0xfe24ae64, 0xfe2466e1, + 0xfe2411df, 0xfe23af34, 0xfe233eb4, 0xfe22c02c, 0xfe22336b, + 0xfe219838, 0xfe20ee58, 0xfe20358c, 0xfe1f6d92, 0xfe1e9621, + 0xfe1daef0, 0xfe1cb7ac, 0xfe1bb002, 0xfe1a9798, 0xfe196e0d, + 0xfe1832fd, 0xfe16e5fe, 0xfe15869d, 0xfe141464, 0xfe128ed3, + 0xfe10f565, 0xfe0f478c, 0xfe0d84b1, 0xfe0bac36, 0xfe09bd73, + 0xfe07b7b5, 0xfe059a40, 0xfe03644c, 0xfe011504, 0xfdfeab88, + 0xfdfc26e9, 0xfdf98629, 0xfdf6c83b, 0xfdf3ec01, 0xfdf0f04a, + 0xfdedd3d1, 0xfdea953d, 0xfde7331e, 0xfde3abe9, 0xfddffdfb, + 0xfddc2791, 0xfdd826cd, 0xfdd3f9a8, 0xfdcf9dfc, 0xfdcb1176, + 0xfdc65198, 0xfdc15bb3, 0xfdbc2ce2, 0xfdb6c206, 0xfdb117be, + 0xfdab2a63, 0xfda4f5fd, 0xfd9e7640, 0xfd97a67a, 0xfd908192, + 0xfd8901f2, 0xfd812182, 0xfd78d98e, 0xfd7022bb, 0xfd66f4ed, + 0xfd5d4732, 0xfd530f9c, 0xfd48432b, 0xfd3cd59a, 0xfd30b936, + 0xfd23dea4, 0xfd16349e, 0xfd07a7a3, 0xfcf8219b, 0xfce7895b, + 0xfcd5c220, 0xfcc2aadb, 0xfcae1d5e, 0xfc97ed4e, 0xfc7fe6d4, + 0xfc65ccf3, 0xfc495762, 0xfc2a2fc8, 0xfc07ee19, 0xfbe213c1, + 0xfbb8051a, 0xfb890078, 0xfb5411a5, 0xfb180005, 0xfad33482, + 0xfa839276, 0xfa263b32, 0xf9b72d1c, 0xf930a1a2, 0xf889f023, + 0xf7b577d2, 0xf69c650c, 0xf51530f0, 0xf2cb0e3c, 0xeeefb15d, + 0xe6da6ecf, +} +var we = [256]float32{ + 2.0249555e-09, 1.486674e-11, 2.4409617e-11, 3.1968806e-11, + 3.844677e-11, 4.4228204e-11, 4.9516443e-11, 5.443359e-11, + 5.905944e-11, 6.344942e-11, 6.7643814e-11, 7.1672945e-11, + 7.556032e-11, 7.932458e-11, 8.298079e-11, 8.654132e-11, + 9.0016515e-11, 9.3415074e-11, 9.674443e-11, 1.0001099e-10, + 1.03220314e-10, 1.06377254e-10, 1.09486115e-10, 1.1255068e-10, + 1.1557435e-10, 1.1856015e-10, 1.2151083e-10, 1.2442886e-10, + 1.2731648e-10, 1.3017575e-10, 1.3300853e-10, 1.3581657e-10, + 1.3860142e-10, 1.4136457e-10, 1.4410738e-10, 1.4683108e-10, + 1.4953687e-10, 1.5222583e-10, 1.54899e-10, 1.5755733e-10, + 1.6020171e-10, 1.6283301e-10, 1.6545203e-10, 1.6805951e-10, + 1.7065617e-10, 1.732427e-10, 1.7581973e-10, 1.7838787e-10, + 1.8094774e-10, 1.8349985e-10, 1.8604476e-10, 1.8858298e-10, + 1.9111498e-10, 1.9364126e-10, 1.9616223e-10, 1.9867835e-10, + 2.0119004e-10, 2.0369768e-10, 2.0620168e-10, 2.087024e-10, + 2.1120022e-10, 2.136955e-10, 2.1618855e-10, 2.1867974e-10, + 2.2116936e-10, 2.2365775e-10, 2.261452e-10, 2.2863202e-10, + 2.311185e-10, 2.3360494e-10, 2.360916e-10, 2.3857874e-10, + 2.4106667e-10, 2.4355562e-10, 2.4604588e-10, 2.485377e-10, + 2.5103128e-10, 2.5352695e-10, 2.560249e-10, 2.585254e-10, + 2.6102867e-10, 2.6353494e-10, 2.6604446e-10, 2.6855745e-10, + 2.7107416e-10, 2.7359479e-10, 2.761196e-10, 2.7864877e-10, + 2.8118255e-10, 2.8372119e-10, 2.8626485e-10, 2.888138e-10, + 2.9136826e-10, 2.939284e-10, 2.9649452e-10, 2.9906677e-10, + 3.016454e-10, 3.0423064e-10, 3.0682268e-10, 3.0942177e-10, + 3.1202813e-10, 3.1464195e-10, 3.1726352e-10, 3.19893e-10, + 3.2253064e-10, 3.251767e-10, 3.2783135e-10, 3.3049485e-10, + 3.3316744e-10, 3.3584938e-10, 3.3854083e-10, 3.4124212e-10, + 3.4395342e-10, 3.46675e-10, 3.4940711e-10, 3.5215003e-10, + 3.5490397e-10, 3.5766917e-10, 3.6044595e-10, 3.6323455e-10, + 3.660352e-10, 3.6884823e-10, 3.7167386e-10, 3.745124e-10, + 3.773641e-10, 3.802293e-10, 3.8310827e-10, 3.860013e-10, + 3.8890866e-10, 3.918307e-10, 3.9476775e-10, 3.9772008e-10, + 4.0068804e-10, 4.0367196e-10, 4.0667217e-10, 4.09689e-10, + 4.1272286e-10, 4.1577405e-10, 4.1884296e-10, 4.2192994e-10, + 4.250354e-10, 4.281597e-10, 4.313033e-10, 4.3446652e-10, + 4.3764986e-10, 4.408537e-10, 4.4407847e-10, 4.4732465e-10, + 4.5059267e-10, 4.5388301e-10, 4.571962e-10, 4.6053267e-10, + 4.6389292e-10, 4.6727755e-10, 4.70687e-10, 4.741219e-10, + 4.7758275e-10, 4.810702e-10, 4.845848e-10, 4.8812715e-10, + 4.9169796e-10, 4.9529775e-10, 4.989273e-10, 5.0258725e-10, + 5.0627835e-10, 5.100013e-10, 5.1375687e-10, 5.1754584e-10, + 5.21369e-10, 5.2522725e-10, 5.2912136e-10, 5.330522e-10, + 5.370208e-10, 5.4102806e-10, 5.45075e-10, 5.491625e-10, + 5.532918e-10, 5.5746385e-10, 5.616799e-10, 5.6594107e-10, + 5.7024857e-10, 5.746037e-10, 5.7900773e-10, 5.834621e-10, + 5.8796823e-10, 5.925276e-10, 5.971417e-10, 6.018122e-10, + 6.065408e-10, 6.113292e-10, 6.1617933e-10, 6.2109295e-10, + 6.260722e-10, 6.3111916e-10, 6.3623595e-10, 6.4142497e-10, + 6.4668854e-10, 6.5202926e-10, 6.5744976e-10, 6.6295286e-10, + 6.6854156e-10, 6.742188e-10, 6.79988e-10, 6.858526e-10, + 6.9181616e-10, 6.978826e-10, 7.04056e-10, 7.103407e-10, + 7.167412e-10, 7.2326256e-10, 7.2990985e-10, 7.366886e-10, + 7.4360473e-10, 7.5066453e-10, 7.5787476e-10, 7.6524265e-10, + 7.7277595e-10, 7.80483e-10, 7.883728e-10, 7.9645507e-10, + 8.047402e-10, 8.1323964e-10, 8.219657e-10, 8.309319e-10, + 8.401528e-10, 8.496445e-10, 8.594247e-10, 8.6951274e-10, + 8.799301e-10, 8.9070046e-10, 9.018503e-10, 9.134092e-10, + 9.254101e-10, 9.378904e-10, 9.508923e-10, 9.644638e-10, + 9.786603e-10, 9.935448e-10, 1.0091913e-09, 1.025686e-09, + 1.0431306e-09, 1.0616465e-09, 1.08138e-09, 1.1025096e-09, + 1.1252564e-09, 1.1498986e-09, 1.1767932e-09, 1.206409e-09, + 1.2393786e-09, 1.276585e-09, 1.3193139e-09, 1.3695435e-09, + 1.4305498e-09, 1.508365e-09, 1.6160854e-09, 1.7921248e-09, +} +var fe = [256]float32{ + 1, 0.9381437, 0.90046996, 0.87170434, 0.8477855, 0.8269933, + 0.8084217, 0.7915276, 0.77595687, 0.7614634, 0.7478686, + 0.7350381, 0.72286767, 0.71127474, 0.70019263, 0.6895665, + 0.67935055, 0.6695063, 0.66000086, 0.65080583, 0.6418967, + 0.63325197, 0.6248527, 0.6166822, 0.60872537, 0.60096896, + 0.5934009, 0.58601034, 0.5787874, 0.57172304, 0.5648092, + 0.5580383, 0.5514034, 0.5448982, 0.5385169, 0.53225386, + 0.5261042, 0.52006316, 0.5141264, 0.50828975, 0.5025495, + 0.496902, 0.49134386, 0.485872, 0.48048335, 0.4751752, + 0.46994483, 0.46478975, 0.45970762, 0.45469615, 0.44975325, + 0.44487688, 0.44006512, 0.43531612, 0.43062815, 0.42599955, + 0.42142874, 0.4169142, 0.41245446, 0.40804818, 0.403694, + 0.3993907, 0.39513698, 0.39093173, 0.38677382, 0.38266218, + 0.37859577, 0.37457356, 0.37059465, 0.3666581, 0.362763, + 0.35890847, 0.35509375, 0.351318, 0.3475805, 0.34388044, + 0.34021714, 0.3365899, 0.33299807, 0.32944095, 0.32591796, + 0.3224285, 0.3189719, 0.31554767, 0.31215525, 0.30879408, + 0.3054636, 0.3021634, 0.29889292, 0.2956517, 0.29243928, + 0.28925523, 0.28609908, 0.28297043, 0.27986884, 0.27679393, + 0.2737453, 0.2707226, 0.2677254, 0.26475343, 0.26180625, + 0.25888354, 0.25598502, 0.2531103, 0.25025907, 0.24743107, + 0.24462597, 0.24184346, 0.23908329, 0.23634516, 0.23362878, + 0.23093392, 0.2282603, 0.22560766, 0.22297576, 0.22036438, + 0.21777324, 0.21520215, 0.21265087, 0.21011916, 0.20760682, + 0.20511365, 0.20263945, 0.20018397, 0.19774707, 0.19532852, + 0.19292815, 0.19054577, 0.1881812, 0.18583426, 0.18350479, + 0.1811926, 0.17889754, 0.17661946, 0.17435817, 0.17211354, + 0.1698854, 0.16767362, 0.16547804, 0.16329853, 0.16113494, + 0.15898713, 0.15685499, 0.15473837, 0.15263714, 0.15055119, + 0.14848037, 0.14642459, 0.14438373, 0.14235765, 0.14034624, + 0.13834943, 0.13636707, 0.13439907, 0.13244532, 0.13050574, + 0.1285802, 0.12666863, 0.12477092, 0.12288698, 0.12101672, + 0.119160056, 0.1173169, 0.115487166, 0.11367077, 0.11186763, + 0.11007768, 0.10830083, 0.10653701, 0.10478614, 0.10304816, + 0.101323, 0.09961058, 0.09791085, 0.09622374, 0.09454919, + 0.09288713, 0.091237515, 0.08960028, 0.087975375, 0.08636274, + 0.08476233, 0.083174095, 0.081597984, 0.08003395, 0.07848195, + 0.076941945, 0.07541389, 0.07389775, 0.072393484, 0.07090106, + 0.069420435, 0.06795159, 0.066494495, 0.06504912, 0.063615434, + 0.062193416, 0.060783047, 0.059384305, 0.057997175, + 0.05662164, 0.05525769, 0.053905312, 0.052564494, 0.051235236, + 0.049917534, 0.048611384, 0.047316793, 0.046033762, 0.0447623, + 0.043502413, 0.042254124, 0.041017443, 0.039792392, + 0.038578995, 0.037377283, 0.036187284, 0.035009038, + 0.033842582, 0.032687962, 0.031545233, 0.030414443, 0.02929566, + 0.02818895, 0.027094385, 0.026012046, 0.024942026, 0.023884421, + 0.022839336, 0.021806888, 0.020787204, 0.019780423, 0.0187867, + 0.0178062, 0.016839107, 0.015885621, 0.014945968, 0.014020392, + 0.013109165, 0.012212592, 0.011331013, 0.01046481, 0.009614414, + 0.008780315, 0.007963077, 0.0071633533, 0.006381906, + 0.0056196423, 0.0048776558, 0.004157295, 0.0034602648, + 0.0027887989, 0.0021459677, 0.0015362998, 0.0009672693, + 0.00045413437, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go new file mode 100644 index 0000000000..8c74a358de --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/normal.go @@ -0,0 +1,158 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/normal.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "math" +) + +/* + * Normal distribution + * + * See "The Ziggurat Method for Generating Random Variables" + * (Marsaglia & Tsang, 2000) + * http://www.jstatsoft.org/v05/i08/paper [pdf] + */ + +const ( + rn = 3.442619855899 +) + +func absInt32(i int32) uint32 { + if i < 0 { + return uint32(-i) + } + return uint32(i) +} + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1). +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func (r *Rand) NormFloat64() float64 { + for { + j := int32(r.Uint32()) // Possibly negative + i := j & 0x7F + x := float64(j) * float64(wn[i]) + if absInt32(j) < kn[i] { + // This case should be hit better than 99% of the time. + return x + } + + if i == 0 { + // This extra work is only required for the base strip. + for { + x = -math.Log(r.Float64()) * (1.0 / rn) + y := -math.Log(r.Float64()) + if y+y >= x*x { + break + } + } + if j > 0 { + return rn + x + } + return -rn - x + } + if fn[i]+float32(r.Float64())*(fn[i-1]-fn[i]) < float32(math.Exp(-.5*x*x)) { + return x + } + } +} + +var kn = [128]uint32{ + 0x76ad2212, 0x0, 0x600f1b53, 0x6ce447a6, 0x725b46a2, + 0x7560051d, 0x774921eb, 0x789a25bd, 0x799045c3, 0x7a4bce5d, + 0x7adf629f, 0x7b5682a6, 0x7bb8a8c6, 0x7c0ae722, 0x7c50cce7, + 0x7c8cec5b, 0x7cc12cd6, 0x7ceefed2, 0x7d177e0b, 0x7d3b8883, + 0x7d5bce6c, 0x7d78dd64, 0x7d932886, 0x7dab0e57, 0x7dc0dd30, + 0x7dd4d688, 0x7de73185, 0x7df81cea, 0x7e07c0a3, 0x7e163efa, + 0x7e23b587, 0x7e303dfd, 0x7e3beec2, 0x7e46db77, 0x7e51155d, + 0x7e5aabb3, 0x7e63abf7, 0x7e6c222c, 0x7e741906, 0x7e7b9a18, + 0x7e82adfa, 0x7e895c63, 0x7e8fac4b, 0x7e95a3fb, 0x7e9b4924, + 0x7ea0a0ef, 0x7ea5b00d, 0x7eaa7ac3, 0x7eaf04f3, 0x7eb3522a, + 0x7eb765a5, 0x7ebb4259, 0x7ebeeafd, 0x7ec2620a, 0x7ec5a9c4, + 0x7ec8c441, 0x7ecbb365, 0x7ece78ed, 0x7ed11671, 0x7ed38d62, + 0x7ed5df12, 0x7ed80cb4, 0x7eda175c, 0x7edc0005, 0x7eddc78e, + 0x7edf6ebf, 0x7ee0f647, 0x7ee25ebe, 0x7ee3a8a9, 0x7ee4d473, + 0x7ee5e276, 0x7ee6d2f5, 0x7ee7a620, 0x7ee85c10, 0x7ee8f4cd, + 0x7ee97047, 0x7ee9ce59, 0x7eea0eca, 0x7eea3147, 0x7eea3568, + 0x7eea1aab, 0x7ee9e071, 0x7ee98602, 0x7ee90a88, 0x7ee86d08, + 0x7ee7ac6a, 0x7ee6c769, 0x7ee5bc9c, 0x7ee48a67, 0x7ee32efc, + 0x7ee1a857, 0x7edff42f, 0x7ede0ffa, 0x7edbf8d9, 0x7ed9ab94, + 0x7ed7248d, 0x7ed45fae, 0x7ed1585c, 0x7ece095f, 0x7eca6ccb, + 0x7ec67be2, 0x7ec22eee, 0x7ebd7d1a, 0x7eb85c35, 0x7eb2c075, + 0x7eac9c20, 0x7ea5df27, 0x7e9e769f, 0x7e964c16, 0x7e8d44ba, + 0x7e834033, 0x7e781728, 0x7e6b9933, 0x7e5d8a1a, 0x7e4d9ded, + 0x7e3b737a, 0x7e268c2f, 0x7e0e3ff5, 0x7df1aa5d, 0x7dcf8c72, + 0x7da61a1e, 0x7d72a0fb, 0x7d30e097, 0x7cd9b4ab, 0x7c600f1a, + 0x7ba90bdc, 0x7a722176, 0x77d664e5, +} +var wn = [128]float32{ + 1.7290405e-09, 1.2680929e-10, 1.6897518e-10, 1.9862688e-10, + 2.2232431e-10, 2.4244937e-10, 2.601613e-10, 2.7611988e-10, + 2.9073963e-10, 3.042997e-10, 3.1699796e-10, 3.289802e-10, + 3.4035738e-10, 3.5121603e-10, 3.616251e-10, 3.7164058e-10, + 3.8130857e-10, 3.9066758e-10, 3.9975012e-10, 4.08584e-10, + 4.1719309e-10, 4.2559822e-10, 4.338176e-10, 4.418672e-10, + 4.497613e-10, 4.5751258e-10, 4.651324e-10, 4.7263105e-10, + 4.8001775e-10, 4.87301e-10, 4.944885e-10, 5.015873e-10, + 5.0860405e-10, 5.155446e-10, 5.2241467e-10, 5.2921934e-10, + 5.359635e-10, 5.426517e-10, 5.4928817e-10, 5.5587696e-10, + 5.624219e-10, 5.6892646e-10, 5.753941e-10, 5.818282e-10, + 5.882317e-10, 5.946077e-10, 6.00959e-10, 6.072884e-10, + 6.135985e-10, 6.19892e-10, 6.2617134e-10, 6.3243905e-10, + 6.386974e-10, 6.449488e-10, 6.511956e-10, 6.5744005e-10, + 6.6368433e-10, 6.699307e-10, 6.7618144e-10, 6.824387e-10, + 6.8870465e-10, 6.949815e-10, 7.012715e-10, 7.075768e-10, + 7.1389966e-10, 7.202424e-10, 7.266073e-10, 7.329966e-10, + 7.394128e-10, 7.4585826e-10, 7.5233547e-10, 7.58847e-10, + 7.653954e-10, 7.719835e-10, 7.7861395e-10, 7.852897e-10, + 7.920138e-10, 7.987892e-10, 8.0561924e-10, 8.125073e-10, + 8.194569e-10, 8.2647167e-10, 8.3355556e-10, 8.407127e-10, + 8.479473e-10, 8.55264e-10, 8.6266755e-10, 8.7016316e-10, + 8.777562e-10, 8.8545243e-10, 8.932582e-10, 9.0117996e-10, + 9.09225e-10, 9.174008e-10, 9.2571584e-10, 9.341788e-10, + 9.427997e-10, 9.515889e-10, 9.605579e-10, 9.697193e-10, + 9.790869e-10, 9.88676e-10, 9.985036e-10, 1.0085882e-09, + 1.0189509e-09, 1.0296151e-09, 1.0406069e-09, 1.0519566e-09, + 1.063698e-09, 1.0758702e-09, 1.0885183e-09, 1.1016947e-09, + 1.1154611e-09, 1.1298902e-09, 1.1450696e-09, 1.1611052e-09, + 1.1781276e-09, 1.1962995e-09, 1.2158287e-09, 1.2369856e-09, + 1.2601323e-09, 1.2857697e-09, 1.3146202e-09, 1.347784e-09, + 1.3870636e-09, 1.4357403e-09, 1.5008659e-09, 1.6030948e-09, +} +var fn = [128]float32{ + 1, 0.9635997, 0.9362827, 0.9130436, 0.89228165, 0.87324303, + 0.8555006, 0.8387836, 0.8229072, 0.8077383, 0.793177, + 0.7791461, 0.7655842, 0.7524416, 0.73967725, 0.7272569, + 0.7151515, 0.7033361, 0.69178915, 0.68049186, 0.6694277, + 0.658582, 0.6479418, 0.63749546, 0.6272325, 0.6171434, + 0.6072195, 0.5974532, 0.58783704, 0.5783647, 0.56903, + 0.5598274, 0.5507518, 0.54179835, 0.5329627, 0.52424055, + 0.5156282, 0.50712204, 0.49871865, 0.49041483, 0.48220766, + 0.4740943, 0.46607214, 0.4581387, 0.45029163, 0.44252872, + 0.43484783, 0.427247, 0.41972435, 0.41227803, 0.40490642, + 0.39760786, 0.3903808, 0.3832238, 0.37613547, 0.36911446, + 0.3621595, 0.35526937, 0.34844297, 0.34167916, 0.33497685, + 0.3283351, 0.3217529, 0.3152294, 0.30876362, 0.30235484, + 0.29600215, 0.28970486, 0.2834622, 0.2772735, 0.27113807, + 0.2650553, 0.25902456, 0.2530453, 0.24711695, 0.241239, + 0.23541094, 0.22963232, 0.2239027, 0.21822165, 0.21258877, + 0.20700371, 0.20146611, 0.19597565, 0.19053204, 0.18513499, + 0.17978427, 0.17447963, 0.1692209, 0.16400786, 0.15884037, + 0.15371831, 0.14864157, 0.14361008, 0.13862377, 0.13368265, + 0.12878671, 0.12393598, 0.119130544, 0.11437051, 0.10965602, + 0.104987256, 0.10036444, 0.095787846, 0.0912578, 0.08677467, + 0.0823389, 0.077950984, 0.073611505, 0.06932112, 0.06508058, + 0.06089077, 0.056752663, 0.0526674, 0.048636295, 0.044660863, + 0.040742867, 0.03688439, 0.033087887, 0.029356318, + 0.025693292, 0.022103304, 0.018592102, 0.015167298, + 0.011839478, 0.008624485, 0.005548995, 0.0026696292, +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go new file mode 100644 index 0000000000..ffd0509bd5 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rand.go @@ -0,0 +1,374 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rand.go + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rand implements pseudo-random number generators. +// +// Random numbers are generated by a Source. Top-level functions, such as +// Float64 and Int, use a default shared Source that produces a deterministic +// sequence of values each time a program is run. Use the Seed function to +// initialize the default Source if different behavior is required for each run. +// The default Source, a LockedSource, is safe for concurrent use by multiple +// goroutines, but Sources created by NewSource are not. However, Sources are small +// and it is reasonable to have a separate Source for each goroutine, seeded +// differently, to avoid locking. +// +// For random numbers suitable for security-sensitive work, see the crypto/rand +// package. +package rand + +import "sync" + +// A Source represents a source of uniformly-distributed +// pseudo-random int64 values in the range [0, 1<<64). +type Source interface { + Uint64() uint64 + Seed(seed uint64) +} + +// NewSource returns a new pseudo-random Source seeded with the given value. +func NewSource(seed uint64) Source { + var rng PCGSource + rng.Seed(seed) + return &rng +} + +// A Rand is a source of random numbers. +type Rand struct { + src Source + + // readVal contains remainder of 64-bit integer used for bytes + // generation during most recent Read call. + // It is saved so next Read call can start where the previous + // one finished. + readVal uint64 + // readPos indicates the number of low-order bytes of readVal + // that are still valid. + readPos int8 +} + +// New returns a new Rand that uses random values from src +// to generate other random values. +func New(src Source) *Rand { + return &Rand{src: src} +} + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +// Seed should not be called concurrently with any other Rand method. +func (r *Rand) Seed(seed uint64) { + if lk, ok := r.src.(*LockedSource); ok { + lk.seedPos(seed, &r.readPos) + return + } + + r.src.Seed(seed) + r.readPos = 0 +} + +// Uint64 returns a pseudo-random 64-bit integer as a uint64. +func (r *Rand) Uint64() uint64 { return r.src.Uint64() } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *Rand) Int63() int64 { return int64(r.src.Uint64() &^ (1 << 63)) } + +// Uint32 returns a pseudo-random 32-bit value as a uint32. +func (r *Rand) Uint32() uint32 { return uint32(r.Uint64() >> 32) } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32. +func (r *Rand) Int31() int32 { return int32(r.Uint64() >> 33) } + +// Int returns a non-negative pseudo-random int. +func (r *Rand) Int() int { + u := uint(r.Uint64()) + return int(u << 1 >> 1) // clear sign bit. +} + +const maxUint64 = (1 << 64) - 1 + +// Uint64n returns, as a uint64, a pseudo-random number in [0,n). +// It is guaranteed more uniform than taking a Source value mod n +// for any n that is not a power of 2. +func (r *Rand) Uint64n(n uint64) uint64 { + if n&(n-1) == 0 { // n is power of two, can mask + if n == 0 { + panic("invalid argument to Uint64n") + } + return r.Uint64() & (n - 1) + } + // If n does not divide v, to avoid bias we must not use + // a v that is within maxUint64%n of the top of the range. + v := r.Uint64() + if v > maxUint64-n { // Fast check. + ceiling := maxUint64 - maxUint64%n + for v >= ceiling { + v = r.Uint64() + } + } + + return v % n +} + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + return int64(r.Uint64n(uint64(n))) +} + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int32(r.Uint64n(uint64(n))) +} + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + // TODO: Avoid some 64-bit ops to make it more efficient on 32-bit machines. + return int(r.Uint64n(uint64(n))) +} + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float64() float64 { + // There is one bug in the value stream: r.Int63() may be so close + // to 1<<63 that the division rounds up to 1.0, and we've guaranteed + // that the result is always less than 1.0. + // + // We tried to fix this by mapping 1.0 back to 0.0, but since float64 + // values near 0 are much denser than near 1, mapping 1 to 0 caused + // a theoretically significant overshoot in the probability of returning 0. + // Instead of that, if we round up to 1, just try again. + // Getting 1 only happens 1/2⁵³ of the time, so most clients + // will not observe it anyway. +again: + f := float64(r.Uint64n(1<<53)) / (1 << 53) + if f == 1.0 { + goto again // resample; this branch is taken O(never) + } + return f +} + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0). +func (r *Rand) Float32() float32 { + // We do not want to return 1.0. + // This only happens 1/2²⁴ of the time (plus the 1/2⁵³ of the time in Float64). +again: + f := float32(r.Float64()) + if f == 1 { + goto again // resample; this branch is taken O(very rarely) + } + return f +} + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func (r *Rand) Perm(n int) []int { + m := make([]int, n) + // In the following loop, the iteration when i=0 always swaps m[0] with m[0]. + // A change to remove this useless iteration is to assign 1 to i in the init + // statement. But Perm also effects r. Making this change will affect + // the final state of r. So this change can't be made for compatibility + // reasons for Go 1. + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + m[i] = m[j] + m[j] = i + } + return m +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func (r *Rand) Shuffle(n int, swap func(i, j int)) { + if n < 0 { + panic("invalid argument to Shuffle") + } + + // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle + // Shuffle really ought not be called with n that doesn't fit in 32 bits. + // Not only will it take a very long time, but with 2³¹! possible permutations, + // there's no way that any PRNG can have a big enough internal state to + // generate even a minuscule percentage of the possible permutations. + // Nevertheless, the right API signature accepts an int n, so handle it as best we can. + i := n - 1 + for ; i > 1<<31-1-1; i-- { + j := int(r.Int63n(int64(i + 1))) + swap(i, j) + } + for ; i > 0; i-- { + j := int(r.Int31n(int32(i + 1))) + swap(i, j) + } +} + +// Read generates len(p) random bytes and writes them into p. It +// always returns len(p) and a nil error. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. +func (r *Rand) Read(p []byte) (n int, err error) { + if lk, ok := r.src.(*LockedSource); ok { + return lk.Read(p, &r.readVal, &r.readPos) + } + return read(p, r.src, &r.readVal, &r.readPos) +} + +func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err error) { + pos := *readPos + val := *readVal + rng, _ := src.(*PCGSource) + for n = 0; n < len(p); n++ { + if pos == 0 { + if rng != nil { + val = rng.Uint64() + } else { + val = src.Uint64() + } + pos = 8 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + *readPos = pos + *readVal = val + return +} + +/* + * Top-level convenience functions + */ + +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) + +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as +// if seeded by Seed(1). +// Seed, unlike the Rand.Seed method, is safe for concurrent use. +func Seed(seed uint64) { globalRand.Seed(seed) } + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64 +// from the default Source. +func Int63() int64 { return globalRand.Int63() } + +// Uint32 returns a pseudo-random 32-bit value as a uint32 +// from the default Source. +func Uint32() uint32 { return globalRand.Uint32() } + +// Uint64 returns a pseudo-random 64-bit value as a uint64 +// from the default Source. +func Uint64() uint64 { return globalRand.Uint64() } + +// Int31 returns a non-negative pseudo-random 31-bit integer as an int32 +// from the default Source. +func Int31() int32 { return globalRand.Int31() } + +// Int returns a non-negative pseudo-random int from the default Source. +func Int() int { return globalRand.Int() } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int63n(n int64) int64 { return globalRand.Int63n(n) } + +// Int31n returns, as an int32, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Int31n(n int32) int32 { return globalRand.Int31n(n) } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n) +// from the default Source. +// It panics if n <= 0. +func Intn(n int) int { return globalRand.Intn(n) } + +// Float64 returns, as a float64, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float64() float64 { return globalRand.Float64() } + +// Float32 returns, as a float32, a pseudo-random number in [0.0,1.0) +// from the default Source. +func Float32() float32 { return globalRand.Float32() } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n) +// from the default Source. +func Perm(n int) []int { return globalRand.Perm(n) } + +// Shuffle pseudo-randomizes the order of elements using the default Source. +// n is the number of elements. Shuffle panics if n < 0. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { globalRand.Shuffle(n, swap) } + +// Read generates len(p) random bytes from the default Source and +// writes them into p. It always returns len(p) and a nil error. +// Read, unlike the Rand.Read method, is safe for concurrent use. +func Read(p []byte) (n int, err error) { return globalRand.Read(p) } + +// NormFloat64 returns a normally distributed float64 in the range +// [-math.MaxFloat64, +math.MaxFloat64] with +// standard normal distribution (mean = 0, stddev = 1) +// from the default Source. +// To produce a different normal distribution, callers can +// adjust the output using: +// +// sample = NormFloat64() * desiredStdDev + desiredMean +func NormFloat64() float64 { return globalRand.NormFloat64() } + +// ExpFloat64 returns an exponentially distributed float64 in the range +// (0, +math.MaxFloat64] with an exponential distribution whose rate parameter +// (lambda) is 1 and whose mean is 1/lambda (1) from the default Source. +// To produce a distribution with a different rate parameter, +// callers can adjust the output using: +// +// sample = ExpFloat64() / desiredRateParameter +func ExpFloat64() float64 { return globalRand.ExpFloat64() } + +// LockedSource is an implementation of Source that is concurrency-safe. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. +type LockedSource struct { + lk sync.Mutex + src PCGSource +} + +func (s *LockedSource) Uint64() (n uint64) { + s.lk.Lock() + n = s.src.Uint64() + s.lk.Unlock() + return +} + +func (s *LockedSource) Seed(seed uint64) { + s.lk.Lock() + s.src.Seed(seed) + s.lk.Unlock() +} + +// seedPos implements Seed for a LockedSource without a race condiiton. +func (s *LockedSource) seedPos(seed uint64, readPos *int8) { + s.lk.Lock() + s.src.Seed(seed) + *readPos = 0 + s.lk.Unlock() +} + +// Read implements Read for a LockedSource. +func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { + s.lk.Lock() + n, err = read(p, &s.src, readVal, readPos) + s.lk.Unlock() + return +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go new file mode 100644 index 0000000000..f04f987989 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/rand/rng.go @@ -0,0 +1,93 @@ +// Copied from https://cs.opensource.google/go/x/exp/+/24438e51023af3bfc1db8aed43c1342817e8cfcd:rand/rng.go + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import ( + "encoding/binary" + "io" + "math/bits" +) + +// PCGSource is an implementation of a 64-bit permuted congruential +// generator as defined in +// +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// +// The generator here is the congruential generator PCG XSL RR 128/64 (LCG) +// as found in the software available at http://www.pcg-random.org/. +// It has period 2^128 with 128 bits of state, producing 64-bit values. +// Is state is represented by two uint64 words. +type PCGSource struct { + low uint64 + high uint64 +} + +const ( + maxUint32 = (1 << 32) - 1 + + multiplier = 47026247687942121848144207491837523525 + mulHigh = multiplier >> 64 + mulLow = multiplier & maxUint64 + + increment = 117397592171526113268558934119004209487 + incHigh = increment >> 64 + incLow = increment & maxUint64 + + // TODO: Use these? + initializer = 245720598905631564143578724636268694099 + initHigh = initializer >> 64 + initLow = initializer & maxUint64 +) + +// Seed uses the provided seed value to initialize the generator to a deterministic state. +func (pcg *PCGSource) Seed(seed uint64) { + pcg.low = seed + pcg.high = seed // TODO: What is right? +} + +// Uint64 returns a pseudo-random 64-bit unsigned integer as a uint64. +func (pcg *PCGSource) Uint64() uint64 { + pcg.multiply() + pcg.add() + // XOR high and low 64 bits together and rotate right by high 6 bits of state. + return bits.RotateLeft64(pcg.high^pcg.low, -int(pcg.high>>58)) +} + +func (pcg *PCGSource) add() { + var carry uint64 + pcg.low, carry = Add64(pcg.low, incLow, 0) + pcg.high, _ = Add64(pcg.high, incHigh, carry) +} + +func (pcg *PCGSource) multiply() { + hi, lo := Mul64(pcg.low, mulLow) + hi += pcg.high * mulLow + hi += pcg.low * mulHigh + pcg.low = lo + pcg.high = hi +} + +// MarshalBinary returns the binary representation of the current state of the generator. +func (pcg *PCGSource) MarshalBinary() ([]byte, error) { + var buf [16]byte + binary.BigEndian.PutUint64(buf[:8], pcg.high) + binary.BigEndian.PutUint64(buf[8:], pcg.low) + return buf[:], nil +} + +// UnmarshalBinary sets the state of the generator to the state represented in data. +func (pcg *PCGSource) UnmarshalBinary(data []byte) error { + if len(data) < 16 { + return io.ErrUnexpectedEOF + } + pcg.low = binary.BigEndian.Uint64(data[8:]) + pcg.high = binary.BigEndian.Uint64(data[:8]) + return nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go index 631f95320e..9616074321 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/randutil/randutil.go @@ -1,54 +1,39 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + // Package randutil provides common random number utilities. package randutil import ( - "math/rand" - "sync" + crand "crypto/rand" + "fmt" + "io" + + xrand "go.mongodb.org/mongo-driver/internal/randutil/rand" ) -// A LockedRand wraps a "math/rand".Rand and is safe to use from multiple goroutines. -type LockedRand struct { - mu sync.Mutex - r *rand.Rand +// NewLockedRand returns a new "x/exp/rand" pseudo-random number generator seeded with a +// cryptographically-secure random number. +// It is safe to use from multiple goroutines. +func NewLockedRand() *xrand.Rand { + var randSrc = new(xrand.LockedSource) + randSrc.Seed(cryptoSeed()) + return xrand.New(randSrc) } -// NewLockedRand returns a new LockedRand that uses random values from src to generate other random -// values. It is safe to use from multiple goroutines. -func NewLockedRand(src rand.Source) *LockedRand { - return &LockedRand{ - // Ignore gosec warning "Use of weak random number generator (math/rand instead of - // crypto/rand)". We intentionally use a pseudo-random number generator. - /* #nosec G404 */ - r: rand.New(src), +// cryptoSeed returns a random uint64 read from the "crypto/rand" random number generator. It is +// intended to be used to seed pseudorandom number generators at package initialization. It panics +// if it encounters any errors. +func cryptoSeed() uint64 { + var b [8]byte + _, err := io.ReadFull(crand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("failed to read 8 bytes from a \"crypto/rand\".Reader: %v", err)) } -} -// Read generates len(p) random bytes and writes them into p. It always returns len(p) and a nil -// error. -func (lr *LockedRand) Read(p []byte) (int, error) { - lr.mu.Lock() - n, err := lr.r.Read(p) - lr.mu.Unlock() - return n, err -} - -// Intn returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). It -// panics if n <= 0. -func (lr *LockedRand) Intn(n int) int { - lr.mu.Lock() - x := lr.r.Intn(n) - lr.mu.Unlock() - return x -} - -// Shuffle pseudo-randomizes the order of elements. n is the number of elements. Shuffle panics if -// n < 0. swap swaps the elements with indexes i and j. -// -// Note that Shuffle locks the LockedRand, so shuffling large collections may adversely affect other -// concurrent calls. If many concurrent Shuffle and random value calls are required, consider using -// the global "math/rand".Shuffle instead because it uses much more granular locking. -func (lr *LockedRand) Shuffle(n int, swap func(i, j int)) { - lr.mu.Lock() - lr.r.Shuffle(n, swap) - lr.mu.Unlock() + return (uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | (uint64(b[5]) << 40) | (uint64(b[6]) << 48) | (uint64(b[7]) << 56) } diff --git a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go index db1e1890e5..6cafa791db 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/string_util.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/string_util.go @@ -33,7 +33,7 @@ func StringSliceFromRawValue(name string, val bson.RawValue) ([]string, error) { return nil, err } - var strs []string + strs := make([]string, 0, len(arrayValues)) for _, arrayVal := range arrayValues { str, ok := arrayVal.StringValueOK() if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go new file mode 100644 index 0000000000..78f16645d7 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/internal/uuid/uuid.go @@ -0,0 +1,53 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +package uuid + +import ( + "io" + + "go.mongodb.org/mongo-driver/internal/randutil" +) + +// UUID represents a UUID. +type UUID [16]byte + +// A source is a UUID generator that reads random values from a io.Reader. +// It should be safe to use from multiple goroutines. +type source struct { + random io.Reader +} + +// new returns a random UUIDv4 with bytes read from the source's random number generator. +func (s *source) new() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(s.random, uuid[:]) + if err != nil { + return UUID{}, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +// newSource returns a source that uses a pseudo-random number generator in reandutil package. +// It is intended to be used to initialize the package-global UUID generator. +func newSource() *source { + return &source{ + random: randutil.NewLockedRand(), + } +} + +// globalSource is a package-global pseudo-random UUID generator. +var globalSource = newSource() + +// New returns a random UUIDv4. It uses a global pseudo-random number generator in randutil +// at package initialization. +// +// New should not be used to generate cryptographically-secure random UUIDs. +func New() (UUID, error) { + return globalSource.new() +} diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go index 0b7432f408..966e43cdaf 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/batch_cursor.go @@ -1,3 +1,9 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + package mongo import ( diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index fb5c91a126..2c58f22294 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -27,6 +27,7 @@ type bulkWriteBatch struct { // bulkWrite perfoms a bulkwrite operation type bulkWrite struct { + comment interface{} ordered *bool bypassDocumentValidation *bool models []WriteModel @@ -35,6 +36,7 @@ type bulkWrite struct { selector description.ServerSelector writeConcern *writeconcern.WriteConcern result BulkWriteResult + let interface{} } func (bw *bulkWrite) execute(ctx context.Context) error { @@ -113,7 +115,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.InsertedCount = int64(res.N) + batchRes.InsertedCount = res.N case *DeleteOneModel, *DeleteManyModel: res, err := bw.runDelete(ctx, batch) if err != nil { @@ -125,7 +127,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.DeletedCount = int64(res.N) + batchRes.DeletedCount = res.N case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel: res, err := bw.runUpdate(ctx, batch) if err != nil { @@ -137,8 +139,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr batchErr.Labels = writeErr.Labels batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError) } - batchRes.MatchedCount = int64(res.N) - batchRes.ModifiedCount = int64(res.NModified) + batchRes.MatchedCount = res.N + batchRes.ModifiedCount = res.NModified batchRes.UpsertedCount = int64(len(res.Upserted)) for _, upsert := range res.Upserted { batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID @@ -177,7 +179,14 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } if bw.bypassDocumentValidation != nil && *bw.bypassDocumentValidation { op = op.BypassDocumentValidation(*bw.bypassDocumentValidation) } @@ -227,7 +236,21 @@ func (bw *bulkWrite) runDelete(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ServerAPI(bw.collection.client.serverAPI) + ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.DeleteResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -308,7 +331,21 @@ func (bw *bulkWrite) runUpdate(ctx context.Context, batch bulkWriteBatch) (opera ServerSelector(bw.selector).ClusterClock(bw.collection.client.clock). Database(bw.collection.db.name).Collection(bw.collection.name). Deployment(bw.collection.client.deployment).Crypt(bw.collection.client.cryptFLE).Hint(hasHint). - ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI) + ArrayFilters(hasArrayFilters).ServerAPI(bw.collection.client.serverAPI).Timeout(bw.collection.client.timeout) + if bw.comment != nil { + comment, err := transformValue(bw.collection.registry, bw.comment, true, "comment") + if err != nil { + return op.Result(), err + } + op.Comment(comment) + } + if bw.let != nil { + let, err := transformBsoncoreDocument(bw.collection.registry, bw.let, true, "let") + if err != nil { + return operation.UpdateResult{}, err + } + op = op.Let(let) + } if bw.ordered != nil { op = op.Ordered(*bw.ordered) } @@ -380,7 +417,6 @@ func createUpdateDoc( } updateDoc, _ = bsoncore.AppendDocumentEnd(updateDoc, uidx) - return updateDoc, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go index b4b8e3ef8c..64f4589189 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write_models.go @@ -152,7 +152,7 @@ func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel { } // SetReplacement specifies a document that will be used to replace the selected document. It cannot be nil and cannot -// contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel { rom.Replacement = rep return rom @@ -210,7 +210,7 @@ func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel { } // SetUpdate specifies the modifications to be made to the selected document. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel { uom.Update = update return uom @@ -274,7 +274,7 @@ func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel { } // SetUpdate specifies the modifications to be made to the selected documents. The value must be a document containing -// update operators (https://docs.mongodb.com/manual/reference/operator/update/). It cannot be nil or empty. +// update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). It cannot be nil or empty. func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel { umm.Update = update return umm diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index f2a194d775..c809002abc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -63,27 +64,28 @@ var ( // ChangeStream is used to iterate over a stream of events. Each event can be decoded into a Go type via the Decode // method or accessed as raw BSON via the Current field. This type is not goroutine safe and must not be used // concurrently by multiple goroutines. For more information about change streams, see -// https://docs.mongodb.com/manual/changeStreams/. +// https://www.mongodb.com/docs/manual/changeStreams/. type ChangeStream struct { // Current is the BSON bytes of the current event. This property is only valid until the next call to Next or // TryNext. If continued access is required, a copy must be made. Current bson.Raw - aggregate *operation.Aggregate - pipelineSlice []bsoncore.Document - cursor changeStreamCursor - cursorOptions driver.CursorOptions - batch []bsoncore.Document - resumeToken bson.Raw - err error - sess *session.Client - client *Client - registry *bsoncodec.Registry - streamType StreamType - options *options.ChangeStreamOptions - selector description.ServerSelector - operationTime *primitive.Timestamp - wireVersion *description.VersionRange + aggregate *operation.Aggregate + pipelineSlice []bsoncore.Document + pipelineOptions map[string]bsoncore.Value + cursor changeStreamCursor + cursorOptions driver.CursorOptions + batch []bsoncore.Document + resumeToken bson.Raw + err error + sess *session.Client + client *Client + registry *bsoncodec.Registry + streamType StreamType + options *options.ChangeStreamOptions + selector description.ServerSelector + operationTime *primitive.Timestamp + wireVersion *description.VersionRange } type changeStreamConfig struct { @@ -131,11 +133,20 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in ReadPreference(config.readPreference).ReadConcern(config.readConcern). Deployment(cs.client.deployment).ClusterClock(cs.client.clock). CommandMonitor(cs.client.monitor).Session(cs.sess).ServerSelector(cs.selector).Retry(driver.RetryNone). - ServerAPI(cs.client.serverAPI).Crypt(config.crypt) + ServerAPI(cs.client.serverAPI).Crypt(config.crypt).Timeout(cs.client.timeout) if cs.options.Collation != nil { cs.aggregate.Collation(bsoncore.Document(cs.options.Collation.ToDocument())) } + if comment := cs.options.Comment; comment != nil { + cs.aggregate.Comment(*comment) + + commentVal, err := transformValue(cs.registry, comment, true, "comment") + if err != nil { + return nil, err + } + cs.cursorOptions.Comment = commentVal + } if cs.options.BatchSize != nil { cs.aggregate.BatchSize(*cs.options.BatchSize) cs.cursorOptions.BatchSize = *cs.options.BatchSize @@ -143,6 +154,37 @@ func newChangeStream(ctx context.Context, config changeStreamConfig, pipeline in if cs.options.MaxAwaitTime != nil { cs.cursorOptions.MaxTimeMS = int64(*cs.options.MaxAwaitTime / time.Millisecond) } + if cs.options.Custom != nil { + // Marshal all custom options before passing to the initial aggregate. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + cs.aggregate.CustomOptions(customOptions) + } + if cs.options.CustomPipeline != nil { + // Marshal all custom pipeline options before building pipeline slice. Return + // any errors from Marshaling. + cs.pipelineOptions = make(map[string]bsoncore.Value) + for optionName, optionValue := range cs.options.CustomPipeline { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(cs.registry, optionValue) + if err != nil { + cs.err = err + closeImplicitSession(cs.sess) + return nil, cs.Err() + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + cs.pipelineOptions[optionName] = optionValueBSON + } + } switch cs.streamType { case ClientStream: @@ -212,7 +254,7 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Deployment(cs.createOperationDeployment(server, conn)) if resuming { - cs.replaceOptions(ctx, cs.wireVersion) + cs.replaceOptions(cs.wireVersion) csOptDoc := cs.createPipelineOptionsDoc() pipIdx, pipDoc := bsoncore.AppendDocumentStart(nil) @@ -229,6 +271,16 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Pipeline(plArr) } + // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already + // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution + // and potential retry. + if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !internal.IsTimeoutContext(ctx) { + newCtx, cancelFunc := internal.MakeTimeoutContext(ctx, *cs.client.timeout) + // Redefine ctx to be the new timeout-derived context. + ctx = newCtx + // Cancel the timeout-derived context at the end of executeOperation to avoid a context leak. + defer cancelFunc() + } if original := cs.aggregate.Execute(ctx); original != nil { retryableRead := cs.client.retryReads && cs.wireVersion != nil && cs.wireVersion.Max >= 6 if !retryableRead { @@ -363,7 +415,16 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { } if cs.options.FullDocument != nil { - plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + // Only append a default "fullDocument" field if wire version is less than 6 (3.6). Otherwise, + // the server will assume users want the default behavior, and "fullDocument" does not need to be + // specified. + if *cs.options.FullDocument != options.Default || (cs.wireVersion != nil && cs.wireVersion.Max < 6) { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocument", string(*cs.options.FullDocument)) + } + } + + if cs.options.FullDocumentBeforeChange != nil { + plDoc = bsoncore.AppendStringElement(plDoc, "fullDocumentBeforeChange", string(*cs.options.FullDocumentBeforeChange)) } if cs.options.ResumeAfter != nil { @@ -376,6 +437,10 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendDocumentElement(plDoc, "resumeAfter", raDoc) } + if cs.options.ShowExpandedEvents != nil { + plDoc = bsoncore.AppendBooleanElement(plDoc, "showExpandedEvents", *cs.options.ShowExpandedEvents) + } + if cs.options.StartAfter != nil { var saDoc bsoncore.Document saDoc, cs.err = transformBsoncoreDocument(cs.registry, cs.options.StartAfter, true, "startAfter") @@ -390,6 +455,11 @@ func (cs *ChangeStream) createPipelineOptionsDoc() bsoncore.Document { plDoc = bsoncore.AppendTimestampElement(plDoc, "startAtOperationTime", cs.options.StartAtOperationTime.T, cs.options.StartAtOperationTime.I) } + // Append custom pipeline options. + for optionName, optionValue := range cs.pipelineOptions { + plDoc = bsoncore.AppendValueElement(plDoc, optionName, optionValue) + } + if plDoc, cs.err = bsoncore.AppendDocumentEnd(plDoc, plDocIdx); cs.err != nil { return nil } @@ -408,7 +478,7 @@ func (cs *ChangeStream) pipelineToBSON() (bsoncore.Document, error) { return pipelineArr, cs.err } -func (cs *ChangeStream) replaceOptions(ctx context.Context, wireVersion *description.VersionRange) { +func (cs *ChangeStream) replaceOptions(wireVersion *description.VersionRange) { // Cached resume token: use the resume token as the resumeAfter option and set no other resume options if cs.resumeToken != nil { cs.options.SetResumeAfter(cs.resumeToken) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go index 36c6e2547a..9c61123c36 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream_deployment.go @@ -40,6 +40,10 @@ func (c *changeStreamDeployment) MinRTT() time.Duration { return c.server.MinRTT() } +func (c *changeStreamDeployment) RTT90() time.Duration { + return c.server.RTT90() +} + func (c *changeStreamDeployment) ProcessError(err error, conn driver.Connection) driver.ProcessErrorResult { ep, ok := c.server.(driver.ErrorProcessor) if !ok { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 63630ebe2d..d409135a77 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -17,6 +17,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/event" + "go.mongodb.org/mongo-driver/internal/uuid" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -25,14 +26,18 @@ import ( "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/auth" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" "go.mongodb.org/mongo-driver/x/mongo/driver/ocsp" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" "go.mongodb.org/mongo-driver/x/mongo/driver/session" "go.mongodb.org/mongo-driver/x/mongo/driver/topology" - "go.mongodb.org/mongo-driver/x/mongo/driver/uuid" ) -const defaultLocalThreshold = 15 * time.Millisecond +const ( + defaultLocalThreshold = 15 * time.Millisecond + defaultMaxPoolSize uint64 = 100 +) var ( // keyVaultCollOpts specifies options used to communicate with the key vault collection @@ -63,14 +68,16 @@ type Client struct { serverAPI *driver.ServerAPIOptions serverMonitor *event.ServerMonitor sessionPool *session.Pool + timeout *time.Duration // client-side encryption fields - keyVaultClientFLE *Client - keyVaultCollFLE *Collection - mongocryptdFLE *mcryptClient - cryptFLE driver.Crypt - metadataClientFLE *Client - internalClientFLE *Client + keyVaultClientFLE *Client + keyVaultCollFLE *Collection + mongocryptdFLE *mongocryptdClient + cryptFLE driver.Crypt + metadataClientFLE *Client + internalClientFLE *Client + encryptedFieldsMap map[string]interface{} } // Connect creates a new Client and then initializes it using the Connect method. This is equivalent to calling @@ -271,6 +278,9 @@ func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error { // StartSession does not actually communicate with the server and will not error if the client is // disconnected. // +// StartSession is safe to call from multiple goroutines concurrently. However, Sessions returned by StartSession are +// not safe for concurrent use by multiple goroutines. +// // If the DefaultReadConcern, DefaultWriteConcern, or DefaultReadPreference options are not set, the client's read // concern, write concern, or read preference will be used, respectively. func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) { @@ -348,6 +358,12 @@ func (c *Client) endSessions(ctx context.Context) { } func (c *Client) configure(opts *options.ClientOptions) error { + var defaultOptions int + // Set default options + if opts.MaxPoolSize == nil { + defaultOptions++ + opts.SetMaxPoolSize(defaultMaxPoolSize) + } if err := opts.Validate(); err != nil { return err } @@ -624,6 +640,8 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithWriteTimeout(func(time.Duration) time.Duration { return *opts.SocketTimeout }), ) } + // Timeout + c.timeout = opts.Timeout // TLSConfig if opts.TLSConfig != nil { connOpts = append(connOpts, topology.WithTLSConfig( @@ -681,15 +699,16 @@ func (c *Client) configure(opts *options.ClientOptions) error { topology.WithClock(func(*session.ClusterClock) *session.ClusterClock { return c.clock }), topology.WithConnectionOptions(func(...topology.ConnectionOption) []topology.ConnectionOption { return connOpts }), ) - c.topologyOptions = append(topologyOpts, topology.WithServerOptions( + topologyOpts = append(topologyOpts, topology.WithServerOptions( func(...topology.ServerOption) []topology.ServerOption { return serverOpts }, )) + c.topologyOptions = topologyOpts // Deployment if opts.Deployment != nil { - // topology options: WithSeedlist, WithURI, WithSRVServiceName and WithSRVMaxHosts - // server options: WithClock and WithConnectionOptions - if len(serverOpts) > 2 || len(topologyOpts) > 4 { + // topology options: WithSeedlist, WithURI, WithSRVServiceName, WithSRVMaxHosts, and WithServerOptions + // server options: WithClock and WithConnectionOptions + default maxPoolSize + if len(serverOpts) > 2+defaultOptions || len(topologyOpts) > 5 { return errors.New("cannot specify topology or server options with a deployment") } c.deployment = opts.Deployment @@ -699,16 +718,30 @@ func (c *Client) configure(opts *options.ClientOptions) error { } func (c *Client) configureAutoEncryption(clientOpts *options.ClientOptions) error { + c.encryptedFieldsMap = clientOpts.AutoEncryptionOptions.EncryptedFieldsMap if err := c.configureKeyVaultClientFLE(clientOpts); err != nil { return err } if err := c.configureMetadataClientFLE(clientOpts); err != nil { return err } - if err := c.configureMongocryptdClientFLE(clientOpts.AutoEncryptionOptions); err != nil { + + mc, err := c.newMongoCrypt(clientOpts.AutoEncryptionOptions) + if err != nil { return err } - return c.configureCryptFLE(clientOpts.AutoEncryptionOptions) + + // If the crypt_shared library was loaded successfully, signal to the mongocryptd client creator + // that it can bypass spawning mongocryptd. + cryptSharedLibAvailable := mc.CryptSharedLibVersionString() != "" + mongocryptdFLE, err := newMongocryptdClient(cryptSharedLibAvailable, clientOpts.AutoEncryptionOptions) + if err != nil { + return err + } + c.mongocryptdFLE = mongocryptdFLE + + c.configureCryptFLE(mc, clientOpts.AutoEncryptionOptions) + return nil } func (c *Client) getOrCreateInternalClient(clientOpts *options.ClientOptions) (*Client, error) { @@ -763,32 +796,90 @@ func (c *Client) configureMetadataClientFLE(clientOpts *options.ClientOptions) e return err } -func (c *Client) configureMongocryptdClientFLE(opts *options.AutoEncryptionOptions) error { - var err error - c.mongocryptdFLE, err = newMcryptClient(opts) - return err -} - -func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { +func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt.MongoCrypt, error) { // convert schemas in SchemaMap to bsoncore documents cryptSchemaMap := make(map[string]bsoncore.Document) for k, v := range opts.SchemaMap { schema, err := transformBsoncoreDocument(c.registry, v, true, "schemaMap") if err != nil { - return err + return nil, err } cryptSchemaMap[k] = schema } + + // convert schemas in EncryptedFieldsMap to bsoncore documents + cryptEncryptedFieldsMap := make(map[string]bsoncore.Document) + for k, v := range opts.EncryptedFieldsMap { + encryptedFields, err := transformBsoncoreDocument(c.registry, v, true, "encryptedFieldsMap") + if err != nil { + return nil, err + } + cryptEncryptedFieldsMap[k] = encryptedFields + } + kmsProviders, err := transformBsoncoreDocument(c.registry, opts.KmsProviders, true, "kmsProviders") if err != nil { - return fmt.Errorf("error creating KMS providers document: %v", err) + return nil, fmt.Errorf("error creating KMS providers document: %v", err) + } + + // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one + // was set. + cryptSharedLibPath := "" + if val, ok := opts.ExtraOptions["cryptSharedLibPath"]; ok { + str, ok := val.(string) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibPath" to be a string, but is a %T`, val) + } + cryptSharedLibPath = str + } + + // Explicitly disable loading the crypt_shared library if requested. Note that this is ONLY + // intended for use from tests; there is no supported public API for explicitly disabling + // loading the crypt_shared library. + cryptSharedLibDisabled := false + if v, ok := opts.ExtraOptions["__cryptSharedLibDisabledForTestOnly"]; ok { + cryptSharedLibDisabled = v.(bool) + } + + bypassAutoEncryption := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + SetLocalSchemaMap(cryptSchemaMap). + SetBypassQueryAnalysis(bypassQueryAnalysis). + SetEncryptedFieldsMap(cryptEncryptedFieldsMap). + SetCryptSharedLibDisabled(cryptSharedLibDisabled || bypassAutoEncryption). + SetCryptSharedLibOverridePath(cryptSharedLibPath)) + if err != nil { + return nil, err + } + + var cryptSharedLibRequired bool + if val, ok := opts.ExtraOptions["cryptSharedLibRequired"]; ok { + b, ok := val.(bool) + if !ok { + return nil, fmt.Errorf( + `expected AutoEncryption extra option "cryptSharedLibRequired" to be a bool, but is a %T`, val) + } + cryptSharedLibRequired = b } - // configure options - var bypass bool - if opts.BypassAutoEncryption != nil { - bypass = *opts.BypassAutoEncryption + // If the "cryptSharedLibRequired" extra option is set to true, check the MongoCrypt version + // string to confirm that the library was successfully loaded. If the version string is empty, + // return an error indicating that we couldn't load the crypt_shared library. + if cryptSharedLibRequired && mc.CryptSharedLibVersionString() == "" { + return nil, errors.New( + `AutoEncryption extra option "cryptSharedLibRequired" is true, but we failed to load the crypt_shared library`) } + + return mc, nil +} + +//nolint:unused // the unused linter thinks that this function is unreachable because "c.newMongoCrypt" always panics without the "cse" build tag set. +func (c *Client) configureCryptFLE(mc *mongocrypt.MongoCrypt, opts *options.AutoEncryptionOptions) { + bypass := opts.BypassAutoEncryption != nil && *opts.BypassAutoEncryption kr := keyRetriever{coll: c.keyVaultCollFLE} var cir collInfoRetriever // If bypass is true, c.metadataClientFLE is nil and the collInfoRetriever @@ -798,23 +889,19 @@ func (c *Client) configureCryptFLE(opts *options.AutoEncryptionOptions) error { cir = collInfoRetriever{client: c.metadataClientFLE} } - cryptOpts := &driver.CryptOptions{ + c.cryptFLE = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, CollInfoFn: cir.cryptCollInfo, KeyFn: kr.cryptKeys, MarkFn: c.mongocryptdFLE.markCommand, - KmsProviders: kmsProviders, TLSConfig: opts.TLSConfig, BypassAutoEncryption: bypass, - SchemaMap: cryptSchemaMap, - } - - c.cryptFLE, err = driver.NewCrypt(cryptOpts) - return err + }) } // validSession returns an error if the session doesn't belong to the client func (c *Client) validSession(sess *session.Client) error { - if sess != nil && !uuid.Equal(sess.ClientID, c.id) { + if sess != nil && sess.ClientID != c.id { return ErrWrongClient } return nil @@ -843,9 +930,9 @@ func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Databa // databases are included in the result. It cannot be nil. An empty document (e.g. bson.D{}) should be used to include // all databases. // -// The opts paramter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). +// The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) { if ctx == nil { ctx = context.Background() @@ -885,7 +972,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... op := operation.NewListDatabases(filterDoc). Session(sess).ReadPreference(c.readPreference).CommandMonitor(c.monitor). ServerSelector(selector).ClusterClock(c.clock).Database("admin").Deployment(c.deployment).Crypt(c.cryptFLE). - ServerAPI(c.serverAPI) + ServerAPI(c.serverAPI).Timeout(c.timeout) if ldo.NameOnly != nil { op = op.NameOnly(*ldo.NameOnly) @@ -918,7 +1005,7 @@ func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ... // The opts parameter can be used to specify options for this operation (see the options.ListDatabasesOptions // documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listDatabases/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listDatabases/. func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) { opts = append(opts, options.ListDatabases().SetNameOnly(true)) @@ -939,6 +1026,9 @@ func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts // SessionContext must be used as the Context parameter for any operations in the fn callback that should be executed // under the session. // +// WithSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// WithSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the one provided. // // Any error returned by the fn callback will be returned without any modifications. @@ -951,6 +1041,9 @@ func WithSession(ctx context.Context, sess Session, fn func(SessionContext) erro // be executed under a session. After the callback returns, the created Session is ended, meaning that any in-progress // transactions started by fn will be aborted even if fn returns an error. // +// UseSession is safe to call from multiple goroutines concurrently. However, the SessionContext passed to the +// UseSession callback function is not safe for concurrent use by multiple goroutines. +// // If the ctx parameter already contains a Session, that Session will be replaced with the newly created one. // // Any error returned by the fn callback will be returned without any modifications. @@ -959,6 +1052,9 @@ func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) } // UseSessionWithOptions operates like UseSession but uses the given SessionOptions to create the Session. +// +// UseSessionWithOptions is safe to call from multiple goroutines concurrently. However, the SessionContext passed to +// the UseSessionWithOptions callback function is not safe for concurrent use by multiple goroutines. func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error { defaultSess, err := c.StartSession(opts) if err != nil { @@ -970,13 +1066,13 @@ func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.Sessio } // Watch returns a change stream for all changes on the deployment. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The client must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil or empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for a list +// nil or empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for a list // of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the mongo.Pipeline{} // type can be used. // diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index fe4646b641..f88b7bede7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -17,7 +17,8 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" - cryptOpts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" + "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt" + mcopts "go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options" ) // ClientEncryption is used to create data keys and explicitly encrypt and decrypt BSON values. @@ -47,36 +48,56 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti return nil, fmt.Errorf("error creating KMS providers map: %v", err) } + mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). + SetKmsProviders(kmsProviders). + // Explicitly disable loading the crypt_shared library for the Crypt used for + // ClientEncryption because it's only needed for AutoEncryption and we don't expect users to + // have the crypt_shared library installed if they're using ClientEncryption. + SetCryptSharedLibDisabled(true)) + if err != nil { + return nil, err + } + // create Crypt kr := keyRetriever{coll: ce.keyVaultColl} cir := collInfoRetriever{client: ce.keyVaultClient} - ce.crypt, err = driver.NewCrypt(&driver.CryptOptions{ - KeyFn: kr.cryptKeys, - CollInfoFn: cir.cryptCollInfo, - KmsProviders: kmsProviders, - TLSConfig: ceo.TLSConfig, + ce.crypt = driver.NewCrypt(&driver.CryptOptions{ + MongoCrypt: mc, + KeyFn: kr.cryptKeys, + CollInfoFn: cir.cryptCollInfo, + TLSConfig: ceo.TLSConfig, }) - if err != nil { - return nil, err - } return ce, nil } -// CreateDataKey creates a new key document and inserts it into the key vault collection. Returns the _id of the -// created document. -func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, opts ...*options.DataKeyOptions) (primitive.Binary, error) { - // translate opts to cryptOpts.DataKeyOptions +// AddKeyAltName adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the +// given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) AddKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + keyAltNameDoc := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + update := bsoncore.NewDocumentBuilder().AppendDocument("$addToSet", keyAltNameDoc).Build() + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// CreateDataKey creates a new key document and inserts into the key vault collection. Returns the _id of the created +// document as a UUID (BSON binary subtype 0x04). +func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider string, + opts ...*options.DataKeyOptions) (primitive.Binary, error) { + + // translate opts to mcopts.DataKeyOptions dko := options.MergeDataKeyOptions(opts...) - co := cryptOpts.DataKey().SetKeyAltNames(dko.KeyAltNames) + co := mcopts.DataKey().SetKeyAltNames(dko.KeyAltNames) if dko.MasterKey != nil { keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, dko.MasterKey, true, "masterKey") if err != nil { return primitive.Binary{}, err } - co.SetMasterKey(keyDoc) } + if dko.KeyMaterial != nil { + co.SetKeyMaterial(dko.KeyMaterial) + } // create data key document dataKeyDoc, err := ce.crypt.CreateDataKey(ctx, kmsProvider, co) @@ -95,9 +116,11 @@ func (ce *ClientEncryption) CreateDataKey(ctx context.Context, kmsProvider strin } // Encrypt encrypts a BSON value with the given key and algorithm. Returns an encrypted value (BSON binary of subtype 6). -func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts ...*options.EncryptOptions) (primitive.Binary, error) { +func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, + opts ...*options.EncryptOptions) (primitive.Binary, error) { + eo := options.MergeEncryptOptions(opts...) - transformed := cryptOpts.ExplicitEncryption() + transformed := mcopts.ExplicitEncryption() if eo.KeyID != nil { transformed.SetKeyID(*eo.KeyID) } @@ -105,6 +128,11 @@ func (ce *ClientEncryption) Encrypt(ctx context.Context, val bson.RawValue, opts transformed.SetKeyAltName(*eo.KeyAltName) } transformed.SetAlgorithm(eo.Algorithm) + transformed.SetQueryType(eo.QueryType) + + if eo.ContentionFactor != nil { + transformed.SetContentionFactor(*eo.ContentionFactor) + } subtype, data, err := ce.crypt.EncryptExplicit(ctx, bsoncore.Value{Type: val.Type, Data: val.Value}, transformed) if err != nil { @@ -130,6 +158,143 @@ func (ce *ClientEncryption) Close(ctx context.Context) error { return ce.keyVaultClient.Disconnect(ctx) } +// DeleteKey removes the key document with the given UUID (BSON binary subtype 0x04) from the key vault collection. +// Returns the result of the internal deleteOne() operation on the key vault collection. +func (ce *ClientEncryption) DeleteKey(ctx context.Context, id primitive.Binary) (*DeleteResult, error) { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.DeleteOne(ctx, filter) +} + +// GetKeyByAltName returns a key document in the key vault collection with the given keyAltName. +func (ce *ClientEncryption) GetKeyByAltName(ctx context.Context, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendString("keyAltNames", keyAltName).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKey finds a single key document with the given UUID (BSON binary subtype 0x04). Returns the result of the +// internal find() operation on the key vault collection. +func (ce *ClientEncryption) GetKey(ctx context.Context, id primitive.Binary) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + return ce.keyVaultColl.FindOne(ctx, filter) +} + +// GetKeys finds all documents in the key vault collection. Returns the result of the internal find() operation on the +// key vault collection. +func (ce *ClientEncryption) GetKeys(ctx context.Context) (*Cursor, error) { + return ce.keyVaultColl.Find(ctx, bson.D{}) +} + +// RemoveKeyAltName removes a keyAltName from the keyAltNames array of the key document in the key vault collection with +// the given UUID (BSON binary subtype 0x04). Returns the previous version of the key document. +func (ce *ClientEncryption) RemoveKeyAltName(ctx context.Context, id primitive.Binary, keyAltName string) *SingleResult { + filter := bsoncore.NewDocumentBuilder().AppendBinary("_id", id.Subtype, id.Data).Build() + update := bson.A{bson.D{{"$set", bson.D{{"keyAltNames", bson.D{{"$cond", bson.A{bson.D{{"$eq", + bson.A{"$keyAltNames", bson.A{keyAltName}}}}, "$$REMOVE", bson.D{{"$filter", + bson.D{{"input", "$keyAltNames"}, {"cond", bson.D{{"$ne", bson.A{"$$this", keyAltName}}}}}}}}}}}}}}} + return ce.keyVaultColl.FindOneAndUpdate(ctx, filter, update) +} + +// setRewrapManyDataKeyWriteModels will prepare the WriteModel slice for a bulk updating rewrapped documents. +func setRewrapManyDataKeyWriteModels(rewrappedDocuments []bsoncore.Document, writeModels *[]WriteModel) error { + const idKey = "_id" + const keyMaterial = "keyMaterial" + const masterKey = "masterKey" + + if writeModels == nil { + return fmt.Errorf("writeModels pointer not set for location referenced") + } + + // Append a slice of WriteModel with the update document per each rewrappedDoc _id filter. + for _, rewrappedDocument := range rewrappedDocuments { + // Prepare the new master key for update. + masterKeyValue, err := rewrappedDocument.LookupErr(masterKey) + if err != nil { + return err + } + masterKeyDoc := masterKeyValue.Document() + + // Prepare the new material key for update. + keyMaterialValue, err := rewrappedDocument.LookupErr(keyMaterial) + if err != nil { + return err + } + keyMaterialSubtype, keyMaterialData := keyMaterialValue.Binary() + keyMaterialBinary := primitive.Binary{Subtype: keyMaterialSubtype, Data: keyMaterialData} + + // Prepare the _id filter for documents to update. + id, err := rewrappedDocument.LookupErr(idKey) + if err != nil { + return err + } + + idSubtype, idData, ok := id.BinaryOK() + if !ok { + return fmt.Errorf("expected to assert %q as binary, got type %T", idKey, id) + } + binaryID := primitive.Binary{Subtype: idSubtype, Data: idData} + + // Append the mutable document to the slice for bulk update. + *writeModels = append(*writeModels, NewUpdateOneModel(). + SetFilter(bson.D{{idKey, binaryID}}). + SetUpdate( + bson.D{ + {"$set", bson.D{{keyMaterial, keyMaterialBinary}, {masterKey, masterKeyDoc}}}, + {"$currentDate", bson.D{{"updateDate", true}}}, + }, + )) + } + return nil +} + +// RewrapManyDataKey decrypts and encrypts all matching data keys with a possibly new masterKey value. For all +// matching documents, this method will overwrite the "masterKey", "updateDate", and "keyMaterial". On error, some +// matching data keys may have been rewrapped. +func (ce *ClientEncryption) RewrapManyDataKey(ctx context.Context, filter interface{}, + opts ...*options.RewrapManyDataKeyOptions) (*RewrapManyDataKeyResult, error) { + + rmdko := options.MergeRewrapManyDataKeyOptions(opts...) + if ctx == nil { + ctx = context.Background() + } + + // Transfer rmdko options to /x/ package options to publish the mongocrypt feed. + co := mcopts.RewrapManyDataKey() + if rmdko.MasterKey != nil { + keyDoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, rmdko.MasterKey, true, "masterKey") + if err != nil { + return nil, err + } + co.SetMasterKey(keyDoc) + } + if rmdko.Provider != nil { + co.SetProvider(*rmdko.Provider) + } + + // Prepare the filters and rewrap the data key using mongocrypt. + filterdoc, err := transformBsoncoreDocument(ce.keyVaultClient.registry, filter, true, "filter") + if err != nil { + return nil, err + } + + rewrappedDocuments, err := ce.crypt.RewrapDataKey(ctx, filterdoc, co) + if err != nil { + return nil, err + } + if len(rewrappedDocuments) == 0 { + // If there are no documents to rewrap, then do nothing. + return new(RewrapManyDataKeyResult), nil + } + + // Prepare the WriteModel slice for bulk updating the rewrapped data keys. + models := []WriteModel{} + if err := setRewrapManyDataKeyWriteModels(rewrappedDocuments, &models); err != nil { + return nil, err + } + + bulkWriteResults, err := ce.keyVaultColl.BulkWrite(ctx, models) + return &RewrapManyDataKeyResult{BulkWriteResult: bulkWriteResults}, err +} + // splitNamespace takes a namespace in the form "database.collection" and returns (database name, collection name) func splitNamespace(ns string) (string, string) { firstDot := strings.Index(ns, ".") diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index a5aaa35ea3..aa3ffbe958 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -16,6 +16,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" @@ -166,7 +167,7 @@ func (coll *Collection) Database() *Database { return coll.db } -// BulkWrite performs a bulk write operation (https://docs.mongodb.com/manual/core/bulk-write-operations/). +// BulkWrite performs a bulk write operation (https://www.mongodb.com/docs/manual/core/bulk-write-operations/). // // The models parameter must be a slice of operations to be executed in this bulk write. It cannot be nil or empty. // All of the models must be non-nil. See the mongo.WriteModel documentation for a list of valid model types and @@ -218,6 +219,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, bwo := options.MergeBulkWriteOptions(opts...) op := bulkWrite{ + comment: bwo.Comment, ordered: bwo.Ordered, bypassDocumentValidation: bwo.BypassDocumentValidation, models: models, @@ -225,6 +227,7 @@ func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel, collection: coll, selector: selector, writeConcern: wc, + let: bwo.Let, } err = op.execute(ctx) @@ -280,11 +283,18 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) imo := options.MergeInsertManyOptions(opts...) if imo.BypassDocumentValidation != nil && *imo.BypassDocumentValidation { op = op.BypassDocumentValidation(*imo.BypassDocumentValidation) } + if imo.Comment != nil { + comment, err := transformValue(coll.registry, imo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if imo.Ordered != nil { op = op.Ordered(*imo.Ordered) } @@ -323,7 +333,7 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertOneOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertOne(ctx context.Context, document interface{}, opts ...*options.InsertOneOptions) (*InsertOneResult, error) { @@ -333,6 +343,9 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, if ioOpts.BypassDocumentValidation != nil && *ioOpts.BypassDocumentValidation { imOpts.SetBypassDocumentValidation(*ioOpts.BypassDocumentValidation) } + if ioOpts.Comment != nil { + imOpts.SetComment(ioOpts.Comment) + } res, err := coll.insert(ctx, []interface{}{document}, imOpts) rr, err := processWriteError(err) @@ -352,7 +365,7 @@ func (coll *Collection) InsertOne(ctx context.Context, document interface{}, // // The opts parameter can be used to specify options for the operation (see the options.InsertManyOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/insert/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/insert/. func (coll *Collection) InsertMany(ctx context.Context, documents []interface{}, opts ...*options.InsertManyOptions) (*InsertManyResult, error) { @@ -450,10 +463,24 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Ordered(true). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) + if do.Comment != nil { + comment, err := transformValue(coll.registry, do.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } if do.Hint != nil { op = op.Hint(true) } + if do.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, do.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } // deleteMany cannot be retried retryMode := driver.RetryNone @@ -465,7 +492,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn if rr&expectedRr == 0 { return nil, err } - return &DeleteResult{DeletedCount: int64(op.Result().N)}, err + return &DeleteResult{DeletedCount: op.Result().N}, err } // DeleteOne executes a delete command to delete at most one document from the collection. @@ -477,7 +504,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -493,7 +520,7 @@ func (coll *Collection) DeleteOne(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for the operation (see the options.DeleteOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/delete/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/delete/. func (coll *Collection) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*DeleteResult, error) { @@ -547,11 +574,26 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc ServerSelector(selector).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).Hint(uo.Hint != nil). - ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI) + ArrayFilters(uo.ArrayFilters != nil).Ordered(true).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) + if uo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, uo.Let, true, "let") + if err != nil { + return nil, err + } + op = op.Let(let) + } if uo.BypassDocumentValidation != nil && *uo.BypassDocumentValidation { op = op.BypassDocumentValidation(*uo.BypassDocumentValidation) } + if uo.Comment != nil { + comment, err := transformValue(coll.registry, uo.Comment, true, "comment") + if err != nil { + return nil, err + } + op = op.Comment(comment) + } retry := driver.RetryNone // retryable writes are only enabled updateOne/replaceOne operations if !multi && coll.client.retryWrites { @@ -567,8 +609,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc opRes := op.Result() res := &UpdateResult{ - MatchedCount: int64(opRes.N), - ModifiedCount: int64(opRes.NModified), + MatchedCount: opRes.N, + ModifiedCount: opRes.NModified, UpsertedCount: int64(len(opRes.Upserted)), } if len(opRes.Upserted) > 0 { @@ -586,12 +628,12 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc // the operation will succeed and an UpdateResult with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { if id == nil { @@ -608,12 +650,12 @@ func (coll *Collection) UpdateByID(ctx context.Context, id interface{}, update i // matched set and MatchedCount will equal 1. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be // made to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -636,12 +678,12 @@ func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, updat // with a MatchedCount of 0 will be returned. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected documents. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.UpdateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*UpdateResult, error) { @@ -665,11 +707,11 @@ func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, upda // selected from the matched set and MatchedCount will equal 1. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.ReplaceOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/update/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/update/. func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) { @@ -693,11 +735,16 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, updateOptions := make([]*options.UpdateOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } uOpts := options.Update() uOpts.BypassDocumentValidation = opt.BypassDocumentValidation uOpts.Collation = opt.Collation uOpts.Upsert = opt.Upsert uOpts.Hint = opt.Hint + uOpts.Let = opt.Let + uOpts.Comment = opt.Comment updateOptions = append(updateOptions, uOpts) } @@ -709,12 +756,12 @@ func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{}, // The pipeline parameter must be an array of documents, each representing an aggregation stage. The pipeline cannot // be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-collection-aggregate-stages for a list of // valid stages in aggregations. // // The opts parameter can be used to specify options for the operation (see the options.AggregateOptions documentation.) // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -735,9 +782,8 @@ func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{}, return aggregate(a) } -// aggreate is the helper method for Aggregate -func aggregate(a aggregateParams) (*Cursor, error) { - +// aggregate is the helper method for Aggregate +func aggregate(a aggregateParams) (cur *Cursor, err error) { if a.ctx == nil { a.ctx = context.Background() } @@ -748,6 +794,12 @@ func aggregate(a aggregateParams) (*Cursor, error) { } sess := sessionFromContext(a.ctx) + // Always close any created implicit sessions if aggregate returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && a.client.sessionPool != nil { sess, err = session.NewClientSession(a.client.sessionPool, a.client.id, session.Implicit) if err != nil { @@ -793,7 +845,8 @@ func aggregate(a aggregateParams) (*Cursor, error) { Deployment(a.client.deployment). Crypt(a.client.cryptFLE). ServerAPI(a.client.serverAPI). - HasOutputStage(hasOutputStage) + HasOutputStage(hasOutputStage). + Timeout(a.client.timeout) if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) @@ -817,11 +870,16 @@ func aggregate(a aggregateParams) (*Cursor, error) { } if ao.Comment != nil { op.Comment(*ao.Comment) + + commentVal, err := transformValue(a.registry, ao.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if ao.Hint != nil { hintVal, err := transformValue(a.registry, ao.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hintVal) @@ -829,11 +887,24 @@ func aggregate(a aggregateParams) (*Cursor, error) { if ao.Let != nil { let, err := transformBsoncoreDocument(a.registry, ao.Let, true, "let") if err != nil { - closeImplicitSession(sess) return nil, err } op.Let(let) } + if ao.Custom != nil { + // Marshal all custom options before passing to the aggregate operation. Return + // any errors from Marshaling. + customOptions := make(map[string]bsoncore.Value) + for optionName, optionValue := range ao.Custom { + bsonType, bsonData, err := bson.MarshalValueWithRegistry(a.registry, optionValue) + if err != nil { + return nil, err + } + optionValueBSON := bsoncore.Value{Type: bsonType, Data: bsonData} + customOptions[optionName] = optionValueBSON + } + op.CustomOptions(customOptions) + } retry := driver.RetryNone if a.retryRead && !hasOutputStage { @@ -843,7 +914,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { err = op.Execute(a.ctx) if err != nil { - closeImplicitSession(sess) if wce, ok := err.(driver.WriteCommandError); ok && wce.WriteConcernError != nil { return nil, *convertDriverWriteConcernError(wce.WriteConcernError) } @@ -852,7 +922,6 @@ func aggregate(a aggregateParams) (*Cursor, error) { bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } cursor, err := newCursorWithSession(bc, a.registry, sess) @@ -901,10 +970,14 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, selector := makeReadPrefSelector(sess, coll.readSelector, coll.client.localThreshold) op := operation.NewAggregate(pipelineArr).Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector).ClusterClock(coll.client.clock).Database(coll.db.name). - Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Collection(coll.name).Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if countOpts.Collation != nil { op.Collation(bsoncore.Document(countOpts.Collation.ToDocument())) } + if countOpts.Comment != nil { + op.Comment(*countOpts.Comment) + } if countOpts.MaxTime != nil { op.MaxTimeMS(int64(*countOpts.MaxTime / time.Millisecond)) } @@ -950,7 +1023,7 @@ func (coll *Collection) CountDocuments(ctx context.Context, filter interface{}, // The opts parameter can be used to specify options for the operation (see the options.EstimatedDocumentCountOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/count/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/count/. func (coll *Collection) EstimatedDocumentCount(ctx context.Context, opts ...*options.EstimatedDocumentCountOptions) (int64, error) { @@ -983,9 +1056,17 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, op := operation.NewCount().Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) co := options.MergeEstimatedDocumentCountOptions(opts...) + if co.Comment != nil { + comment, err := transformValue(coll.registry, co.Comment, false, "comment") + if err != nil { + return 0, err + } + op = op.Comment(comment) + } if co.MaxTime != nil { op = op.MaxTimeMS(int64(*co.MaxTime / time.Millisecond)) } @@ -1009,7 +1090,7 @@ func (coll *Collection) EstimatedDocumentCount(ctx context.Context, // // The opts parameter can be used to specify options for the operation (see the options.DistinctOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/distinct/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/distinct/. func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{}, opts ...*options.DistinctOptions) ([]interface{}, error) { @@ -1049,11 +1130,19 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i Session(sess).ClusterClock(coll.client.clock). Database(coll.db.name).Collection(coll.name).CommandMonitor(coll.client.monitor). Deployment(coll.client.deployment).ReadConcern(rc).ReadPreference(coll.readPreference). - ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + ServerSelector(selector).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) if option.Collation != nil { op.Collation(bsoncore.Document(option.Collation.ToDocument())) } + if option.Comment != nil { + comment, err := transformValue(coll.registry, option.Comment, true, "comment") + if err != nil { + return nil, err + } + op.Comment(comment) + } if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) } @@ -1098,9 +1187,9 @@ func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter i // // The opts parameter can be used to specify options for the operation (see the options.FindOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) Find(ctx context.Context, filter interface{}, - opts ...*options.FindOptions) (*Cursor, error) { + opts ...*options.FindOptions) (cur *Cursor, err error) { if ctx == nil { ctx = context.Background() @@ -1112,6 +1201,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } sess := sessionFromContext(ctx) + // Always close any created implicit sessions if Find returns an error. + defer func() { + if err != nil && sess != nil { + closeImplicitSession(sess) + } + }() if sess == nil && coll.client.sessionPool != nil { var err error sess, err = session.NewClientSession(coll.client.sessionPool, coll.client.id, session.Implicit) @@ -1122,7 +1217,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, err = coll.client.validSession(sess) if err != nil { - closeImplicitSession(sess) return nil, err } @@ -1136,7 +1230,8 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, Session(sess).ReadConcern(rc).ReadPreference(coll.readPreference). CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). - Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI) + Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). + Timeout(coll.client.timeout) fo := options.MergeFindOptions(opts...) cursorOpts := coll.client.createBaseCursorOptions() @@ -1156,6 +1251,12 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, } if fo.Comment != nil { op.Comment(*fo.Comment) + + commentVal, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return nil, err + } + cursorOpts.Comment = commentVal } if fo.CursorType != nil { switch *fo.CursorType { @@ -1169,11 +1270,17 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Hint != nil { hint, err := transformValue(coll.registry, fo.Hint, false, "hint") if err != nil { - closeImplicitSession(sess) return nil, err } op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return nil, err + } + op.Let(let) + } if fo.Limit != nil { limit := *fo.Limit if limit < 0 { @@ -1186,7 +1293,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Max != nil { max, err := transformBsoncoreDocument(coll.registry, fo.Max, true, "max") if err != nil { - closeImplicitSession(sess) return nil, err } op.Max(max) @@ -1200,7 +1306,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Min != nil { min, err := transformBsoncoreDocument(coll.registry, fo.Min, true, "min") if err != nil { - closeImplicitSession(sess) return nil, err } op.Min(min) @@ -1214,7 +1319,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Projection != nil { proj, err := transformBsoncoreDocument(coll.registry, fo.Projection, true, "projection") if err != nil { - closeImplicitSession(sess) return nil, err } op.Projection(proj) @@ -1234,7 +1338,6 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, if fo.Sort != nil { sort, err := transformBsoncoreDocument(coll.registry, fo.Sort, false, "sort") if err != nil { - closeImplicitSession(sess) return nil, err } op.Sort(sort) @@ -1246,13 +1349,11 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, op = op.Retry(retry) if err = op.Execute(ctx); err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } bc, err := op.Result(cursorOpts) if err != nil { - closeImplicitSession(sess) return nil, replaceErrors(err) } return newCursorWithSession(bc, coll.registry, sess) @@ -1266,7 +1367,7 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, // // The opts parameter can be used to specify options for this operation (see the options.FindOneOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/find/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/find/. func (coll *Collection) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *SingleResult { @@ -1276,6 +1377,9 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, findOpts := make([]*options.FindOptions, 0, len(opts)) for _, opt := range opts { + if opt == nil { + continue + } findOpts = append(findOpts, &options.FindOptions{ AllowPartialResults: opt.AllowPartialResults, BatchSize: opt.BatchSize, @@ -1369,7 +1473,7 @@ func (coll *Collection) findAndModify(ctx context.Context, op *operation.FindAnd // The opts parameter can be used to specify options for the operation (see the options.FindOneAndDeleteOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{}, opts ...*options.FindOneAndDeleteOptions) *SingleResult { @@ -1378,10 +1482,17 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} return &SingleResult{err: err} } fod := options.MergeFindOneAndDeleteOptions(opts...) - op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).Remove(true).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fod.Collation != nil { op = op.Collation(bsoncore.Document(fod.Collation.ToDocument())) } + if fod.Comment != nil { + comment, err := transformValue(coll.registry, fod.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fod.MaxTime != nil { op = op.MaxTimeMS(int64(*fod.MaxTime / time.Millisecond)) } @@ -1406,6 +1517,13 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fod.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fod.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1418,12 +1536,12 @@ func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{} // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The replacement parameter must be a document that will be used to replace the selected document. It cannot be nil -// and cannot contain any update operators (https://docs.mongodb.com/manual/reference/operator/update/). +// and cannot contain any update operators (https://www.mongodb.com/docs/manual/reference/operator/update/). // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndReplaceOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{}, replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult { @@ -1441,13 +1559,20 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ fo := options.MergeFindOneAndReplaceOptions(opts...) op := operation.NewFindAndModify(f).Update(bsoncore.Value{Type: bsontype.EmbeddedDocument, Data: r}). - ServerAPI(coll.client.serverAPI) + ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) if fo.BypassDocumentValidation != nil && *fo.BypassDocumentValidation { op = op.BypassDocumentValidation(*fo.BypassDocumentValidation) } if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1478,6 +1603,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } @@ -1490,13 +1622,13 @@ func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{ // ErrNoDocuments wil be returned. If the filter matches multiple documents, one will be selected from the matched set. // // The update parameter must be a document containing update operators -// (https://docs.mongodb.com/manual/reference/operator/update/) and can be used to specify the modifications to be made +// (https://www.mongodb.com/docs/manual/reference/operator/update/) and can be used to specify the modifications to be made // to the selected document. It cannot be nil or empty. // // The opts parameter can be used to specify options for the operation (see the options.FindOneAndUpdateOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/findAndModify/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/findAndModify/. func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{}, update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult { @@ -1510,7 +1642,7 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } fo := options.MergeFindOneAndUpdateOptions(opts...) - op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI) + op := operation.NewFindAndModify(f).ServerAPI(coll.client.serverAPI).Timeout(coll.client.timeout) u, err := transformUpdateValue(coll.registry, update, true) if err != nil { @@ -1531,6 +1663,13 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} if fo.Collation != nil { op = op.Collation(bsoncore.Document(fo.Collation.ToDocument())) } + if fo.Comment != nil { + comment, err := transformValue(coll.registry, fo.Comment, true, "comment") + if err != nil { + return &SingleResult{err: err} + } + op = op.Comment(comment) + } if fo.MaxTime != nil { op = op.MaxTimeMS(int64(*fo.MaxTime / time.Millisecond)) } @@ -1561,18 +1700,25 @@ func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{} } op = op.Hint(hint) } + if fo.Let != nil { + let, err := transformBsoncoreDocument(coll.registry, fo.Let, true, "let") + if err != nil { + return &SingleResult{err: err} + } + op = op.Let(let) + } return coll.findAndModify(ctx, op) } // Watch returns a change stream for all changes on the corresponding collection. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Collection must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be an array of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -1602,6 +1748,69 @@ func (coll *Collection) Indexes() IndexView { // Drop drops the collection on the server. This method ignores "namespace not found" errors so it is safe to drop // a collection that does not exist on the server. func (coll *Collection) Drop(ctx context.Context) error { + // Follow Client-Side Encryption specification to check for encryptedFields. + // Drop does not have an encryptedFields option. See: GODRIVER-2413. + // Check for encryptedFields from the client EncryptedFieldsMap. + // Check for encryptedFields from the server if EncryptedFieldsMap is set. + ef := coll.db.getEncryptedFieldsFromMap(coll.name) + if ef == nil && coll.db.client.encryptedFieldsMap != nil { + var err error + if ef, err = coll.db.getEncryptedFieldsFromServer(ctx, coll.name); err != nil { + return err + } + } + + if ef != nil { + return coll.dropEncryptedCollection(ctx, ef) + } + + return coll.drop(ctx) +} + +// dropEncryptedCollection drops a collection with EncryptedFields. +func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { + efBSON, err := transformBsoncoreDocument(coll.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Drop the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + // Drop ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedStateCollection) + if err != nil { + return err + } + if err := coll.db.Collection(escCollection).drop(ctx); err != nil { + return err + } + + // Drop ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + if err := coll.db.Collection(eccCollection).drop(ctx); err != nil { + return err + } + + // Drop ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, coll.name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + if err := coll.db.Collection(ecocCollection).drop(ctx); err != nil { + return err + } + + // Drop the data collection. + if err := coll.drop(ctx); err != nil { + return err + } + return nil +} + +// drop drops a collection without EncryptedFields. +func (coll *Collection) drop(ctx context.Context) error { if ctx == nil { ctx = context.Background() } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index 3ec03baf4b..d21005fedb 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -15,6 +15,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -67,6 +68,47 @@ func newEmptyCursor() *Cursor { return &Cursor{bc: driver.NewEmptyBatchCursor()} } +// NewCursorFromDocuments creates a new Cursor pre-loaded with the provided documents, error and registry. If no registry is provided, +// bson.DefaultRegistry will be used. +// +// The documents parameter must be a slice of documents. The slice may be nil or empty, but all elements must be non-nil. +func NewCursorFromDocuments(documents []interface{}, err error, registry *bsoncodec.Registry) (*Cursor, error) { + if registry == nil { + registry = bson.DefaultRegistry + } + + // Convert documents slice to a sequence-style byte array. + var docsBytes []byte + for _, doc := range documents { + switch t := doc.(type) { + case nil: + return nil, ErrNilDocument + case bsonx.Doc: + doc = t.Copy() + case []byte: + // Slight optimization so we'll just use MarshalBSON and not go through the codec machinery. + doc = bson.Raw(t) + } + var marshalErr error + docsBytes, marshalErr = bson.MarshalAppendWithRegistry(registry, docsBytes, doc) + if marshalErr != nil { + return nil, marshalErr + } + } + + c := &Cursor{ + bc: driver.NewBatchCursorFromDocuments(docsBytes), + registry: registry, + err: err, + } + + // Initialize batch and batchLength here. The underlying batch cursor will be preloaded with the + // provided contents, and thus already has a batch before calls to Next/TryNext. + c.batch = c.bc.Batch() + c.batchLength = c.bc.Batch().DocumentCount() + return c, nil +} + // ID returns the ID of this cursor, or 0 if the cursor has been closed or exhausted. func (c *Cursor) ID() int64 { return c.bc.ID() } @@ -83,7 +125,7 @@ func (c *Cursor) Next(ctx context.Context) bool { // TryNext attempts to get the next document for this cursor. It returns true if there were no errors and the next // document is available. This is only recommended for use with tailable cursors as a non-blocking alternative to -// Next. See https://docs.mongodb.com/manual/core/tailable-cursors/ for more information about tailable cursors. +// Next. See https://www.mongodb.com/docs/manual/core/tailable-cursors/ for more information about tailable cursors. // // TryNext returns false if the cursor is exhausted, an error occurs when getting results from the server, the next // document is not yet available, or ctx expires. If ctx expires, the error will be set to ctx.Err(). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index 2078733443..57b5417fd3 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -13,12 +13,12 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/bsoncodec" + "go.mongodb.org/mongo-driver/internal" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/mongo/writeconcern" - "go.mongodb.org/mongo-driver/x/bsonx" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -107,12 +107,12 @@ func (db *Database) Collection(name string, opts ...*options.CollectionOptions) // The pipeline parameter must be a slice of documents, each representing an aggregation stage. The pipeline // cannot be nil but can be empty. The stage documents must all be non-nil. For a pipeline of bson.D documents, the // mongo.Pipeline type can be used. See -// https://docs.mongodb.com/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid +// https://www.mongodb.com/docs/manual/reference/operator/aggregation-pipeline/#db-aggregate-stages for a list of valid // stages in database-level aggregations. // // The opts parameter can be used to specify options for this operation (see the options.AggregateOptions documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/aggregate/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/aggregate/. func (db *Database) Aggregate(ctx context.Context, pipeline interface{}, opts ...*options.AggregateOptions) (*Cursor, error) { a := aggregateParams{ @@ -176,7 +176,8 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). - Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI), sess, nil + Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). + Timeout(db.client.timeout), sess, nil } // RunCommand executes the given command against the database. This function does not obey the Database's read @@ -184,11 +185,13 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. -// Specifying API versioning options in the command document and declaring an API version on the client is not supported. -// The behavior of RunCommand is undefined in this case. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommand is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult { if ctx == nil { ctx = context.Background() @@ -217,9 +220,13 @@ func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. -// If the command document contains a session ID or any transaction-specific fields, the behavior is undefined. // // The opts parameter can be used to specify options for this operation (see the options.RunCmdOptions documentation). +// +// The behavior of RunCommandCursor is undefined if the command document contains any of the following: +// - A session ID or any transaction-specific fields +// - API versioning options when an API version is already declared on the Client +// - maxTimeMS when Timeout is set on the Client func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -302,7 +309,10 @@ func (db *Database) Drop(ctx context.Context) error { // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionSpecifications prevents listing more than 100 collections per database when running +// against MongoDB version 2.6. func (db *Database) ListCollectionSpecifications(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]*CollectionSpecification, error) { @@ -336,7 +346,10 @@ func (db *Database) ListCollectionSpecifications(ctx context.Context, filter int // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollections prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -372,7 +385,7 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt Session(sess).ReadPreference(db.readPreference).CommandMonitor(db.client.monitor). ServerSelector(selector).ClusterClock(db.client.clock). Database(db.name).Deployment(db.client.deployment).Crypt(db.client.cryptFLE). - ServerAPI(db.client.serverAPI) + ServerAPI(db.client.serverAPI).Timeout(db.client.timeout) cursorOpts := db.client.createBaseCursorOptions() if lco.NameOnly != nil { @@ -382,6 +395,9 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt cursorOpts.BatchSize = *lco.BatchSize op = op.BatchSize(*lco.BatchSize) } + if lco.AuthorizedCollections != nil { + op = op.AuthorizedCollections(*lco.AuthorizedCollections) + } retry := driver.RetryNone if db.client.retryReads { @@ -414,7 +430,10 @@ func (db *Database) ListCollections(ctx context.Context, filter interface{}, opt // The opts parameter can be used to specify options for the operation (see the options.ListCollectionsOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listCollections/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listCollections/. +// +// BUG(benjirewis): ListCollectionNames prevents listing more than 100 collections per database when running against +// MongoDB version 2.6. func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) ([]string, error) { opts = append(opts, options.ListCollections().SetNameOnly(true)) @@ -427,19 +446,13 @@ func (db *Database) ListCollectionNames(ctx context.Context, filter interface{}, names := make([]string, 0) for res.Next(ctx) { - next := &bsonx.Doc{} - err = res.Decode(next) - if err != nil { - return nil, err - } - - elem, err := next.LookupErr("name") + elem, err := res.Current.LookupErr("name") if err != nil { return nil, err } - if elem.Type() != bson.TypeString { - return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type(), bson.TypeString) + if elem.Type != bson.TypeString { + return nil, fmt.Errorf("incorrect type for 'name'. got %v. want %v", elem.Type, bson.TypeString) } elemName := elem.StringValue() @@ -466,13 +479,13 @@ func (db *Database) WriteConcern() *writeconcern.WriteConcern { } // Watch returns a change stream for all changes to the corresponding database. See -// https://docs.mongodb.com/manual/changeStreams/ for more information about change streams. +// https://www.mongodb.com/docs/manual/changeStreams/ for more information about change streams. // // The Database must be configured with read concern majority or no read concern for a change stream to be created // successfully. // // The pipeline parameter must be a slice of documents, each representing a pipeline stage. The pipeline cannot be -// nil but can be empty. The stage documents must all be non-nil. See https://docs.mongodb.com/manual/changeStreams/ for +// nil but can be empty. The stage documents must all be non-nil. See https://www.mongodb.com/docs/manual/changeStreams/ for // a list of pipeline stages that can be used with change streams. For a pipeline of bson.D documents, the // mongo.Pipeline{} type can be used. // @@ -500,8 +513,141 @@ func (db *Database) Watch(ctx context.Context, pipeline interface{}, // The opts parameter can be used to specify options for the operation (see the options.CreateCollectionOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/create/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/create/. func (db *Database) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + cco := options.MergeCreateCollectionOptions(opts...) + // Follow Client-Side Encryption specification to check for encryptedFields. + // Check for encryptedFields from create options. + ef := cco.EncryptedFields + // Check for encryptedFields from the client EncryptedFieldsMap. + if ef == nil { + ef = db.getEncryptedFieldsFromMap(name) + } + if ef != nil { + return db.createCollectionWithEncryptedFields(ctx, name, ef, opts...) + } + + return db.createCollection(ctx, name, opts...) +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by running the "listCollections" command. +// Returns nil and no error if the listCollections command succeeds, but "encryptedFields" is not present. +func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collectionName string) (interface{}, error) { + // Check if collection has an EncryptedFields configured server-side. + collSpecs, err := db.ListCollectionSpecifications(ctx, bson.D{{"name", collectionName}}) + if err != nil { + return nil, err + } + if len(collSpecs) == 0 { + return nil, nil + } + if len(collSpecs) > 1 { + return nil, fmt.Errorf("expected 1 or 0 results from listCollections, got %v", len(collSpecs)) + } + collSpec := collSpecs[0] + rawValue, err := collSpec.Options.LookupErr("encryptedFields") + if err == bsoncore.ErrElementNotFound { + return nil, nil + } else if err != nil { + return nil, err + } + + encryptedFields, ok := rawValue.DocumentOK() + if !ok { + return nil, fmt.Errorf("expected encryptedFields of %v to be document, got %v", collectionName, rawValue.Type) + } + + return encryptedFields, nil +} + +// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. +func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { + // Check the EncryptedFieldsMap + efMap := db.client.encryptedFieldsMap + if efMap == nil { + return nil + } + + namespace := db.name + "." + collectionName + + ef, ok := efMap[namespace] + if ok { + return ef + } + return nil +} + +// createCollectionWithEncryptedFields creates a collection with an EncryptedFields. +func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { + efBSON, err := transformBsoncoreDocument(db.registry, ef, true /* mapAllowed */, "encryptedFields") + if err != nil { + return fmt.Errorf("error transforming document: %v", err) + } + + // Create the three encryption-related, associated collections: `escCollection`, `eccCollection` and `ecocCollection`. + + stateCollectionOpts := options.CreateCollection(). + SetClusteredIndex(bson.D{{"key", bson.D{{"_id", 1}}}, {"unique", true}}) + // Create ESCCollection. + escCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedStateCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, escCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECCCollection. + eccCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCacheCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, eccCollection, stateCollectionOpts); err != nil { + return err + } + + // Create ECOCCollection. + ecocCollection, err := internal.GetEncryptedStateCollectionName(efBSON, name, internal.EncryptedCompactionCollection) + if err != nil { + return err + } + + if err := db.createCollection(ctx, ecocCollection, stateCollectionOpts); err != nil { + return err + } + + // Create a data collection with the 'encryptedFields' option. + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + + op.EncryptedFields(efBSON) + if err := db.executeCreateOperation(ctx, op); err != nil { + return err + } + + // Create an index on the __safeContent__ field in the collection @collectionName. + if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil { + return fmt.Errorf("error creating safeContent index: %v", err) + } + + return nil +} + +// createCollection creates a collection without EncryptedFields. +func (db *Database) createCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error { + op, err := db.createCollectionOperation(name, opts...) + if err != nil { + return err + } + return db.executeCreateOperation(ctx, op) +} + +func (db *Database) createCollectionOperation(name string, opts ...*options.CreateCollectionOptions) (*operation.Create, error) { cco := options.MergeCreateCollectionOptions(opts...) op := operation.NewCreate(name).ServerAPI(db.client.serverAPI) @@ -511,19 +657,26 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Collation != nil { op.Collation(bsoncore.Document(cco.Collation.ToDocument())) } + if cco.ChangeStreamPreAndPostImages != nil { + csppi, err := transformBsoncoreDocument(db.registry, cco.ChangeStreamPreAndPostImages, true, "changeStreamPreAndPostImages") + if err != nil { + return nil, err + } + op.ChangeStreamPreAndPostImages(csppi) + } if cco.DefaultIndexOptions != nil { idx, doc := bsoncore.AppendDocumentStart(nil) if cco.DefaultIndexOptions.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.DefaultIndexOptions.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } doc = bsoncore.AppendDocumentElement(doc, "storageEngine", storageEngine) } doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.IndexOptionDefaults(doc) @@ -537,7 +690,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.StorageEngine != nil { storageEngine, err := transformBsoncoreDocument(db.registry, cco.StorageEngine, true, "storageEngine") if err != nil { - return err + return nil, err } op.StorageEngine(storageEngine) } @@ -550,7 +703,7 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* if cco.Validator != nil { validator, err := transformBsoncoreDocument(db.registry, cco.Validator, true, "validator") if err != nil { - return err + return nil, err } op.Validator(validator) } @@ -570,17 +723,24 @@ func (db *Database) CreateCollection(ctx context.Context, name string, opts ...* doc, err := bsoncore.AppendDocumentEnd(doc, idx) if err != nil { - return err + return nil, err } op.TimeSeries(doc) } + if cco.ClusteredIndex != nil { + clusteredIndex, err := transformBsoncoreDocument(db.registry, cco.ClusteredIndex, true, "clusteredIndex") + if err != nil { + return nil, err + } + op.ClusteredIndex(clusteredIndex) + } - return db.executeCreateOperation(ctx, op) + return op, nil } // CreateView executes a create command to explicitly create a view on the server. See -// https://docs.mongodb.com/manual/core/views/ for more information about views. This method requires driver version >= +// https://www.mongodb.com/docs/manual/core/views/ for more information about views. This method requires driver version >= // 1.4.0 and MongoDB version >= 3.4. // // The viewName parameter specifies the name of the view to create. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index 753c45b666..8e810cb9cd 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -143,7 +143,7 @@ func readPrefSelector(rp *readpref.ReadPref, isOutputAggregate bool) ServerSelec return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) { if t.Kind == LoadBalanced { // In LoadBalanced mode, there should only be one server in the topology and it must be selected. We check - // this before checking MaxStaleness support becuase there's no monitoring in this mode, so the candidate + // this before checking MaxStaleness support because there's no monitoring in this mode, so the candidate // server wouldn't have a wire version set, which would result in an error. return candidates, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go index 669aa14c9f..76a063facc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/doc.go @@ -105,8 +105,14 @@ // // Note: Auto encryption is an enterprise-only feature. // -// The libmongocrypt C library is required when using client-side encryption. libmongocrypt version 1.3.0 or higher is -// required when using driver version 1.8.0 or higher. To install libmongocrypt, follow the instructions for your +// The libmongocrypt C library is required when using client-side encryption. Specific versions of libmongocrypt +// are required for different versions of the Go Driver: +// - Go Driver v1.2.0 requires libmongocrypt v1.0.0 or higher +// - Go Driver v1.5.0 requires libmongocrypt v1.1.0 or higher +// - Go Driver v1.8.0 requires libmongocrypt v1.3.0 or higher +// - Go Driver v1.10.0 requires libmongocrypt v1.5.0 or higher +// +// To install libmongocrypt, follow the instructions for your // operating system: // // 1. Linux: follow the instructions listed at @@ -117,6 +123,7 @@ // to install packages via brew and compile the libmongocrypt source code. // // 3. Windows: +// // mkdir -p c:/libmongocrypt/bin // mkdir -p c:/libmongocrypt/include // @@ -128,18 +135,8 @@ // cp ./include/mongocrypt/*.h c:/libmongocrypt/include // export PATH=$PATH:/cygdrive/c/libmongocrypt/bin // -// libmongocrypt communicates with the mongocryptd process for automatic encryption. This process can be started manually -// or auto-spawned by the driver itself. To enable auto-spawning, ensure the process binary is on the PATH. To start it -// manually, use AutoEncryptionOptions: -// -// aeo := options.AutoEncryption() -// mongocryptdOpts := map[string]interface{}{ -// "mongocryptdBypassSpawn": true, -// } -// aeo.SetExtraOptions(mongocryptdOpts) -// To specify a process URI for mongocryptd, the "mongocryptdURI" option can be passed in the ExtraOptions map as well. -// See the ClientSideEncryption and ClientSideEncryptionCreateKey examples below for code samples about using this -// feature. +// libmongocrypt communicates with the mongocryptd process or mongo_crypt shared library for automatic encryption. +// See AutoEncryptionOpts.SetExtraOptions for options to configure use of mongocryptd or mongo_crypt. // -// [1] See https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format +// [1] See https://www.mongodb.com/docs/manual/reference/connection-string/#dns-seedlist-connection-format package mongo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 2c3ae15790..33e23573fc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -56,6 +56,7 @@ func replaceErrors(err error) error { Labels: de.Labels, Name: de.Name, Wrapped: de.Wrapped, + Raw: bson.Raw(de.Raw), } } if qe, ok := err.(driver.QueryFailureError); ok { @@ -63,6 +64,7 @@ func replaceErrors(err error) error { ce := CommandError{ Name: qe.Message, Wrapped: qe.Wrapped, + Raw: bson.Raw(qe.Response), } dollarErr, err := qe.Response.LookupErr("$err") @@ -102,6 +104,9 @@ func IsTimeout(err error) bool { if err == context.DeadlineExceeded { return true } + if err == driver.ErrDeadlineWouldBeExceeded { + return true + } if ne, ok := err.(net.Error); ok { return ne.Timeout() } @@ -207,6 +212,7 @@ type ServerError interface { } var _ ServerError = CommandError{} +var _ ServerError = WriteError{} var _ ServerError = WriteException{} var _ ServerError = BulkWriteException{} @@ -217,6 +223,7 @@ type CommandError struct { Labels []string // Categories to which the error belongs Name string // A human-readable name corresponding to the error code Wrapped error // The underlying error, if one exists. + Raw bson.Raw // The original server response containing the error. } // Error implements the error interface. @@ -276,6 +283,9 @@ type WriteError struct { Code int Message string Details bson.Raw + + // The original write error from the server response. + Raw bson.Raw } func (we WriteError) Error() string { @@ -286,6 +296,30 @@ func (we WriteError) Error() string { return msg } +// HasErrorCode returns true if the error has the specified code. +func (we WriteError) HasErrorCode(code int) bool { + return we.Code == code +} + +// HasErrorLabel returns true if the error contains the specified label. WriteErrors do not contain labels, +// so we always return false. +func (we WriteError) HasErrorLabel(label string) bool { + return false +} + +// HasErrorMessage returns true if the error contains the specified message. +func (we WriteError) HasErrorMessage(message string) bool { + return strings.Contains(we.Message, message) +} + +// HasErrorCodeWithMessage returns true if the error has the specified code and Message contains the specified message. +func (we WriteError) HasErrorCodeWithMessage(code int, message string) bool { + return we.Code == code && strings.Contains(we.Message, message) +} + +// serverError implements the ServerError interface. +func (we WriteError) serverError() {} + // WriteErrors is a group of write errors that occurred during execution of a write operation. type WriteErrors []WriteError @@ -307,6 +341,7 @@ func writeErrorsFromDriverWriteErrors(errs driver.WriteErrors) WriteErrors { Code: int(err.Code), Message: err.Message, Details: bson.Raw(err.Details), + Raw: bson.Raw(err.Raw), }) } return wes @@ -319,6 +354,7 @@ type WriteConcernError struct { Code int Message string Details bson.Raw + Raw bson.Raw // The original write concern error from the server response. } // Error implements the error interface. @@ -340,6 +376,9 @@ type WriteException struct { // The categories to which the exception belongs. Labels []string + + // The original server response containing the error. + Raw bson.Raw } // Error implements the error interface. @@ -426,6 +465,7 @@ func convertDriverWriteConcernError(wce *driver.WriteConcernError) *WriteConcern Code: int(wce.Code), Message: wce.Message, Details: bson.Raw(wce.Details), + Raw: bson.Raw(wce.Raw), } } @@ -559,6 +599,7 @@ func processWriteError(err error) (returnResult, error) { WriteConcernError: convertDriverWriteConcernError(tt.WriteConcernError), WriteErrors: writeErrorsFromDriverWriteErrors(tt.WriteErrors), Labels: tt.Labels, + Raw: bson.Raw(tt.Raw), } default: return rrNone, replaceErrors(err) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index e8e260f166..a393c7e7c5 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -45,7 +45,7 @@ type IndexView struct { // IndexModel represents a new index to be created. type IndexModel struct { // A document describing which keys should be used for the index. It cannot be nil. This must be an order-preserving - // type such as bson.D. Map types such as bson.M are not valid. See https://docs.mongodb.com/manual/indexes/#indexes + // type such as bson.D. Map types such as bson.M are not valid. See https://www.mongodb.com/docs/manual/indexes/#indexes // for examples of valid documents. Keys interface{} @@ -65,7 +65,7 @@ func isNamespaceNotFoundError(err error) bool { // The opts parameter can be used to specify options for this operation (see the options.ListIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/listIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/listIndexes/. func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) { if ctx == nil { ctx = context.Background() @@ -95,7 +95,8 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) cursorOpts := iv.coll.client.createBaseCursorOptions() lio := options.MergeListIndexesOptions(opts...) @@ -175,7 +176,7 @@ func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*op // The opts parameter can be used to specify options for this operation (see the options.CreateIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/createIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/createIndexes/. func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) { names := make([]string, 0, len(models)) @@ -256,7 +257,8 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). - Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerSelector(selector).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if option.MaxTime != nil { op.MaxTimeMS(int64(*option.MaxTime / time.Millisecond)) @@ -400,7 +402,8 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name). - Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI) + Deployment(iv.coll.client.deployment).ServerAPI(iv.coll.client.serverAPI). + Timeout(iv.coll.client.timeout) if dio.MaxTime != nil { op.MaxTimeMS(int64(*dio.MaxTime / time.Millisecond)) } @@ -427,7 +430,7 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) { if name == "*" { return nil, ErrMultipleIndexDrop @@ -443,7 +446,7 @@ func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.D // The opts parameter can be used to specify options for this operation (see the options.DropIndexesOptions // documentation). // -// For more information about the command, see https://docs.mongodb.com/manual/reference/command/dropIndexes/. +// For more information about the command, see https://www.mongodb.com/docs/manual/reference/command/dropIndexes/. func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) { return iv.drop(ctx, "*", opts...) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 89eec43427..80282527e4 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -123,17 +123,6 @@ func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonco return doc, id, nil } -func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) { - if doc, ok := val.(bsonx.Doc); ok { - return doc.Copy(), nil - } - b, err := transformBsoncoreDocument(registry, val, true, "document") - if err != nil { - return nil, err - } - return bsonx.ReadDoc(b) -} - func transformBsoncoreDocument(registry *bsoncodec.Registry, val interface{}, mapAllowed bool, paramName string) (bsoncore.Document, error) { if registry == nil { registry = bson.DefaultRegistry @@ -175,7 +164,7 @@ func ensureDollarKey(doc bsoncore.Document) error { func ensureNoDollarKey(doc bsoncore.Document) error { if elem, err := doc.IndexErr(0); err == nil && strings.HasPrefix(elem.Key(), "$") { - return errors.New("replacement document cannot contains keys beginning with '$") + return errors.New("replacement document cannot contain keys beginning with '$'") } return nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go index c36b1d31cd..016ccef62c 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongocryptd.go @@ -28,17 +28,18 @@ const ( var defaultTimeoutArgs = []string{"--idleShutdownTimeoutSecs=60"} var databaseOpts = options.Database().SetReadConcern(readconcern.New()).SetReadPreference(readpref.Primary()) -type mcryptClient struct { +type mongocryptdClient struct { bypassSpawn bool client *Client path string spawnArgs []string } -func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) { +func newMongocryptdClient(cryptSharedLibAvailable bool, opts *options.AutoEncryptionOptions) (*mongocryptdClient, error) { // create mcryptClient instance and spawn process if necessary var bypassSpawn bool var bypassAutoEncryption bool + if bypass, ok := opts.ExtraOptions["mongocryptdBypassSpawn"]; ok { bypassSpawn = bypass.(bool) } @@ -46,10 +47,15 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) bypassAutoEncryption = *opts.BypassAutoEncryption } - mc := &mcryptClient{ - // mongocryptd should not be spawned if mongocryptdBypassSpawn is passed or if bypassAutoEncryption is - // specified because it is not used during decryption - bypassSpawn: bypassSpawn || bypassAutoEncryption, + bypassQueryAnalysis := opts.BypassQueryAnalysis != nil && *opts.BypassQueryAnalysis + + mc := &mongocryptdClient{ + // mongocryptd should not be spawned if any of these conditions are true: + // - mongocryptdBypassSpawn is passed + // - bypassAutoEncryption is true because mongocryptd is not used during decryption + // - bypassQueryAnalysis is true because mongocryptd is not used during decryption + // - the crypt_shared library is available because it replaces all mongocryptd functionality. + bypassSpawn: bypassSpawn || bypassAutoEncryption || bypassQueryAnalysis || cryptSharedLibAvailable, } if !mc.bypassSpawn { @@ -76,7 +82,7 @@ func newMcryptClient(opts *options.AutoEncryptionOptions) (*mcryptClient, error) } // markCommand executes the given command on mongocryptd. -func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { +func (mc *mongocryptdClient) markCommand(ctx context.Context, dbName string, cmd bsoncore.Document) (bsoncore.Document, error) { // Remove the explicit session from the context if one is set. // The explicit session will be from a different client. // If an explicit session is set, it is applied after automatic encryption. @@ -105,16 +111,16 @@ func (mc *mcryptClient) markCommand(ctx context.Context, dbName string, cmd bson } // connect connects the underlying Client instance. This must be called before performing any mark operations. -func (mc *mcryptClient) connect(ctx context.Context) error { +func (mc *mongocryptdClient) connect(ctx context.Context) error { return mc.client.Connect(ctx) } // disconnect disconnects the underlying Client instance. This should be called after all operations have completed. -func (mc *mcryptClient) disconnect(ctx context.Context) error { +func (mc *mongocryptdClient) disconnect(ctx context.Context) error { return mc.client.Disconnect(ctx) } -func (mc *mcryptClient) spawnProcess() error { +func (mc *mongocryptdClient) spawnProcess() error { // Ignore gosec warning about subprocess launched with externally-provided path variable. /* #nosec G204 */ cmd := exec.Command(mc.path, mc.spawnArgs...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go index e1f710fd23..983eba24fd 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/aggregateoptions.go @@ -6,7 +6,11 @@ package options -import "time" +import ( + "time" + + "go.mongodb.org/mongo-driver/bson" +) // AggregateOptions represents options that can be used to configure an Aggregate operation. type AggregateOptions struct { @@ -19,7 +23,7 @@ type AggregateOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool @@ -30,6 +34,10 @@ type AggregateOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there // is no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the Aggregate operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. @@ -37,7 +45,7 @@ type AggregateOptions struct { MaxAwaitTime *time.Duration // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. - // The default is the empty string, which means that no comment will be included in the logs. + // The default is nil, which means that no comment will be included in the logs. Comment *string // The index to use for the aggregation. This should either be the index name as a string or the index specification @@ -50,6 +58,11 @@ type AggregateOptions struct { // Values must be constant or closed expressions that do not reference document fields. Parameters can then be // accessed as variables in an aggregate expression context (e.g. "$$var"). Let interface{} + + // Custom options to be added to aggregate expression. Key-value pairs of the BSON map should correlate with desired + // option names and values. Values must be Marshalable. Custom options may conflict with non-custom options, and custom + // options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M } // Aggregate creates a new AggregateOptions instance. @@ -82,6 +95,10 @@ func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. +// The more general Timeout option should be used in its place to control the amount of time that the +// Aggregate operation can run before returning an error. func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions { ao.MaxTime = &d return ao @@ -111,6 +128,15 @@ func (ao *AggregateOptions) SetLet(let interface{}) *AggregateOptions { return ao } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (ao *AggregateOptions) SetCustom(c bson.M) *AggregateOptions { + ao.Custom = c + return ao +} + // MergeAggregateOptions combines the given AggregateOptions instances into a single AggregateOptions in a last-one-wins // fashion. func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { @@ -146,6 +172,9 @@ func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions { if ao.Let != nil { aggOpts.Let = ao.Let } + if ao.Custom != nil { + aggOpts.Custom = ao.Custom + } } return aggOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go index 89c3c05f16..375d899918 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/autoencryptionoptions.go @@ -32,6 +32,8 @@ type AutoEncryptionOptions struct { BypassAutoEncryption *bool ExtraOptions map[string]interface{} TLSConfig map[string]*tls.Config + EncryptedFieldsMap map[string]interface{} + BypassQueryAnalysis *bool } // AutoEncryption creates a new AutoEncryptionOptions configured with default values. @@ -90,7 +92,35 @@ func (a *AutoEncryptionOptions) SetBypassAutoEncryption(bypass bool) *AutoEncryp return a } -// SetExtraOptions specifies a map of options to configure the mongocryptd process. +// SetExtraOptions specifies a map of options to configure the mongocryptd process or mongo_crypt shared library. +// +// Supported Extra Options +// +// "mongocryptdURI" - The mongocryptd URI. Allows setting a custom URI used to communicate with the +// mongocryptd process. The default is "mongodb://localhost:27020", which works with the default +// mongocryptd process spawned by the Client. Must be a string. +// +// "mongocryptdBypassSpawn" - If set to true, the Client will not attempt to spawn a mongocryptd +// process. Must be a bool. +// +// "mongocryptdSpawnPath" - The path used when spawning mongocryptd. +// Defaults to empty string and spawns mongocryptd from system path. Must be a string. +// +// "mongocryptdSpawnArgs" - Command line arguments passed when spawning mongocryptd. +// Defaults to ["--idleShutdownTimeoutSecs=60"]. Must be an array of strings. +// +// "cryptSharedLibRequired" - If set to true, Client creation will return an error if the +// crypt_shared library is not loaded. If unset or set to false, Client creation will not return an +// error if the crypt_shared library is not loaded. The default is unset. Must be a bool. +// +// "cryptSharedLibPath" - The crypt_shared library override path. This must be the path to the +// crypt_shared dynamic library file (for example, a .so, .dll, or .dylib file), not the directory +// that contains it. If the override path is a relative path, it will be resolved relative to the +// working directory of the process. If the override path is a relative path and the first path +// component is the literal string "$ORIGIN", the "$ORIGIN" component will be replaced by the +// absolute path to the directory containing the linked libmongocrypt library. Setting an override +// path disables the default system library search path. If an override path is specified but the +// crypt_shared library cannot be loaded, Client creation will return an error. Must be a string. func (a *AutoEncryptionOptions) SetExtraOptions(extraOpts map[string]interface{}) *AutoEncryptionOptions { a.ExtraOptions = extraOpts return a @@ -113,6 +143,22 @@ func (a *AutoEncryptionOptions) SetTLSConfig(tlsOpts map[string]*tls.Config) *Au return a } +// SetEncryptedFieldsMap specifies a map from namespace to local EncryptedFieldsMap document. +// EncryptedFieldsMap is used for Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetEncryptedFieldsMap(ef map[string]interface{}) *AutoEncryptionOptions { + a.EncryptedFieldsMap = ef + return a +} + +// SetBypassQueryAnalysis specifies whether or not query analysis should be used for automatic encryption. +// Use this option when using explicit encryption with Queryable Encryption. +// Queryable Encryption is in Public Technical Preview. Queryable Encryption should not be used in production and is subject to backwards breaking changes. +func (a *AutoEncryptionOptions) SetBypassQueryAnalysis(bypass bool) *AutoEncryptionOptions { + a.BypassQueryAnalysis = &bypass + return a +} + // MergeAutoEncryptionOptions combines the argued AutoEncryptionOptions in a last-one wins fashion. func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionOptions { aeo := AutoEncryption() @@ -142,6 +188,12 @@ func MergeAutoEncryptionOptions(opts ...*AutoEncryptionOptions) *AutoEncryptionO if opt.TLSConfig != nil { aeo.TLSConfig = opt.TLSConfig } + if opt.EncryptedFieldsMap != nil { + aeo.EncryptedFieldsMap = opt.EncryptedFieldsMap + } + if opt.BypassQueryAnalysis != nil { + aeo.BypassQueryAnalysis = opt.BypassQueryAnalysis + } } return aeo diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go index 57f98f83d1..0c36d0b7b0 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/bulkwriteoptions.go @@ -13,12 +13,22 @@ var DefaultOrdered = true type BulkWriteOptions struct { // If true, writes executed as part of the operation will opt out of document-level validation on the server. This // option is valid for MongoDB versions >= 3.2 and is ignored for previous server versions. The default value is - // false. See https://docs.mongodb.com/manual/core/schema-validation/ for more information about document + // false. See https://www.mongodb.com/docs/manual/core/schema-validation/ for more information about document // validation. BypassDocumentValidation *bool + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default value is nil, which means that no comment will be included in the logs. + Comment interface{} + // If true, no writes will be executed after one fails. The default value is true. Ordered *bool + + // Specifies parameters for all update and delete commands in the BulkWrite. This option is only valid for MongoDB + // versions >= 5.0. Older servers will report an error for using this option. This must be a document mapping + // parameter names to values. Values must be constant or closed expressions that do not reference document fields. + // Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). + Let interface{} } // BulkWrite creates a new *BulkWriteOptions instance. @@ -28,6 +38,12 @@ func BulkWrite() *BulkWriteOptions { } } +// SetComment sets the value for the Comment field. +func (b *BulkWriteOptions) SetComment(comment interface{}) *BulkWriteOptions { + b.Comment = comment + return b +} + // SetOrdered sets the value for the Ordered field. func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions { b.Ordered = &ordered @@ -40,6 +56,15 @@ func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOp return b } +// SetLet sets the value for the Let field. Let specifies parameters for all update and delete commands in the BulkWrite. +// This option is only valid for MongoDB versions >= 5.0. Older servers will report an error for using this option. +// This must be a document mapping parameter names to values. Values must be constant or closed expressions that do not +// reference document fields. Parameters can then be accessed as variables in an aggregate expression context (e.g. "$$var"). +func (b *BulkWriteOptions) SetLet(let interface{}) *BulkWriteOptions { + b.Let = &let + return b +} + // MergeBulkWriteOptions combines the given BulkWriteOptions instances into a single BulkWriteOptions in a last-one-wins // fashion. func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { @@ -48,12 +73,18 @@ func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions { if opt == nil { continue } + if opt.Comment != nil { + b.Comment = opt.Comment + } if opt.Ordered != nil { b.Ordered = opt.Ordered } if opt.BypassDocumentValidation != nil { b.BypassDocumentValidation = opt.BypassDocumentValidation } + if opt.Let != nil { + b.Let = opt.Let + } } return b diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go index fe19f45ebb..862abcd340 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/changestreamoptions.go @@ -9,6 +9,7 @@ package options import ( "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -22,11 +23,18 @@ type ChangeStreamOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation - // Specifies whether the updated document should be returned in change notifications for update operations along - // with the deltas describing the changes made to the document. The default is options.Default, which means that - // the updated document will not be included in the change notification. + // A string that will be included in server logs, profiling logs, and currentOp queries to help trace the operation. + // The default is nil, which means that no comment will be included in the logs. + Comment *string + + // Specifies how the updated document should be returned in change notifications for update operations. The default + // is options.Default, which means that only partial update deltas will be included in the change notification. FullDocument *FullDocument + // Specifies how the pre-update document should be returned in change notifications for update operations. The default + // is options.Off, which means that the pre-update document will not be included in the change notification. + FullDocumentBeforeChange *FullDocument + // The maximum amount of time that the server should wait for new documents to satisfy a tailable cursor query. MaxAwaitTime *time.Duration @@ -35,6 +43,11 @@ type ChangeStreamOptions struct { // StartAfter must not be set. ResumeAfter interface{} + // ShowExpandedEvents specifies whether the server will return an expanded list of change stream events. Additional + // events include: createIndexes, dropIndexes, modify, create, shardCollection, reshardCollection and + // refineCollectionShardKey. This option is only valid for MongoDB versions >= 6.0. + ShowExpandedEvents *bool + // If specified, the change stream will only return changes that occurred at or after the given timestamp. This // option is only valid for MongoDB versions >= 4.0. If this is specified, ResumeAfter and StartAfter must not be // set. @@ -46,6 +59,16 @@ type ChangeStreamOptions struct { // corresponding to an oplog entry immediately after the specified token will be returned. If this is specified, // ResumeAfter and StartAtOperationTime must not be set. This option is only valid for MongoDB versions >= 4.1.1. StartAfter interface{} + + // Custom options to be added to the initial aggregate for the change stream. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom options may conflict with + // non-custom options, and custom options bypass client-side validation. Prefer using non-custom options where possible. + Custom bson.M + + // Custom options to be added to the $changeStream stage in the initial aggregate. Key-value pairs of the BSON map should + // correlate with desired option names and values. Values must be Marshalable. Custom pipeline options bypass client-side + // validation. Prefer using non-custom options where possible. + CustomPipeline bson.M } // ChangeStream creates a new ChangeStreamOptions instance. @@ -67,12 +90,24 @@ func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions { return cso } +// SetComment sets the value for the Comment field. +func (cso *ChangeStreamOptions) SetComment(comment string) *ChangeStreamOptions { + cso.Comment = &comment + return cso +} + // SetFullDocument sets the value for the FullDocument field. func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions { cso.FullDocument = &fd return cso } +// SetFullDocumentBeforeChange sets the value for the FullDocumentBeforeChange field. +func (cso *ChangeStreamOptions) SetFullDocumentBeforeChange(fdbc FullDocument) *ChangeStreamOptions { + cso.FullDocumentBeforeChange = &fdbc + return cso +} + // SetMaxAwaitTime sets the value for the MaxAwaitTime field. func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions { cso.MaxAwaitTime = &d @@ -85,6 +120,12 @@ func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOpti return cso } +// SetShowExpandedEvents sets the value for the ShowExpandedEvents field. +func (cso *ChangeStreamOptions) SetShowExpandedEvents(see bool) *ChangeStreamOptions { + cso.ShowExpandedEvents = &see + return cso +} + // SetStartAtOperationTime sets the value for the StartAtOperationTime field. func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions { cso.StartAtOperationTime = t @@ -97,6 +138,23 @@ func (cso *ChangeStreamOptions) SetStartAfter(sa interface{}) *ChangeStreamOptio return cso } +// SetCustom sets the value for the Custom field. Key-value pairs of the BSON map should correlate +// with desired option names and values. Values must be Marshalable. Custom options may conflict +// with non-custom options, and custom options bypass client-side validation. Prefer using non-custom +// options where possible. +func (cso *ChangeStreamOptions) SetCustom(c bson.M) *ChangeStreamOptions { + cso.Custom = c + return cso +} + +// SetCustomPipeline sets the value for the CustomPipeline field. Key-value pairs of the BSON map +// should correlate with desired option names and values. Values must be Marshalable. Custom pipeline +// options bypass client-side validation. Prefer using non-custom options where possible. +func (cso *ChangeStreamOptions) SetCustomPipeline(cp bson.M) *ChangeStreamOptions { + cso.CustomPipeline = cp + return cso +} + // MergeChangeStreamOptions combines the given ChangeStreamOptions instances into a single ChangeStreamOptions in a // last-one-wins fashion. func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions { @@ -111,21 +169,36 @@ func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions if cso.Collation != nil { csOpts.Collation = cso.Collation } + if cso.Comment != nil { + csOpts.Comment = cso.Comment + } if cso.FullDocument != nil { csOpts.FullDocument = cso.FullDocument } + if cso.FullDocumentBeforeChange != nil { + csOpts.FullDocumentBeforeChange = cso.FullDocumentBeforeChange + } if cso.MaxAwaitTime != nil { csOpts.MaxAwaitTime = cso.MaxAwaitTime } if cso.ResumeAfter != nil { csOpts.ResumeAfter = cso.ResumeAfter } + if cso.ShowExpandedEvents != nil { + csOpts.ShowExpandedEvents = cso.ShowExpandedEvents + } if cso.StartAtOperationTime != nil { csOpts.StartAtOperationTime = cso.StartAtOperationTime } if cso.StartAfter != nil { csOpts.StartAfter = cso.StartAfter } + if cso.Custom != nil { + csOpts.Custom = cso.Custom + } + if cso.CustomPipeline != nil { + csOpts.CustomPipeline = cso.CustomPipeline + } } return csOpts diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index da5f630d19..05f974f501 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -45,7 +45,7 @@ type ContextDialer interface { // AuthMechanism: the mechanism to use for authentication. Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", // "MONGODB-CR", "PLAIN", "GSSAPI", "MONGODB-X509", and "MONGODB-AWS". This can also be set through the "authMechanism" // URI option. (e.g. "authMechanism=PLAIN"). For more information, see -// https://docs.mongodb.com/manual/core/authentication-mechanisms/. +// https://www.mongodb.com/docs/manual/core/authentication-mechanisms/. // // AuthMechanismProperties can be used to specify additional configuration options for certain mechanisms. They can also // be set through the "authMechanismProperites" URI option @@ -121,9 +121,9 @@ type ClientOptions struct { RetryWrites *bool ServerAPIOptions *ServerAPIOptions ServerSelectionTimeout *time.Duration - SocketTimeout *time.Duration SRVMaxHosts *int SRVServiceName *string + Timeout *time.Duration TLSConfig *tls.Config WriteConcern *writeconcern.WriteConcern ZlibLevel *int @@ -151,6 +151,13 @@ type ClientOptions struct { // Deprecated: This option is for internal use only and should not be set. It may be changed or removed in any // release. Deployment driver.Deployment + + // SocketTimeout specifies the timeout to be used for the Client's socket reads and writes. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that a single operation can run on the Client + // before returning an error. SocketTimeout is still usable through the deprecated setter. + SocketTimeout *time.Duration } // Client creates a new ClientOptions instance. @@ -160,57 +167,58 @@ func Client() *ClientOptions { // Validate validates the client options. This method will return the first error found. func (c *ClientOptions) Validate() error { - c.validateAndSetError() - return c.err -} - -func (c *ClientOptions) validateAndSetError() { if c.err != nil { - return + return c.err } + c.err = c.validate() + return c.err +} +func (c *ClientOptions) validate() error { // Direct connections cannot be made if multiple hosts are specified or an SRV URI is used. if c.Direct != nil && *c.Direct { if len(c.Hosts) > 1 { - c.err = errors.New("a direct connection cannot be made if multiple hosts are specified") - return + return errors.New("a direct connection cannot be made if multiple hosts are specified") } if c.cs != nil && c.cs.Scheme == connstring.SchemeMongoDBSRV { - c.err = errors.New("a direct connection cannot be made if an SRV URI is used") - return + return errors.New("a direct connection cannot be made if an SRV URI is used") } } + if c.MaxPoolSize != nil && c.MinPoolSize != nil && *c.MaxPoolSize != 0 && *c.MinPoolSize > *c.MaxPoolSize { + return fmt.Errorf("minPoolSize must be less than or equal to maxPoolSize, got minPoolSize=%d maxPoolSize=%d", *c.MinPoolSize, *c.MaxPoolSize) + } + // verify server API version if ServerAPIOptions are passed in. if c.ServerAPIOptions != nil { - c.err = c.ServerAPIOptions.ServerAPIVersion.Validate() + if err := c.ServerAPIOptions.ServerAPIVersion.Validate(); err != nil { + return err + } } // Validation for load-balanced mode. if c.LoadBalanced != nil && *c.LoadBalanced { if len(c.Hosts) > 1 { - c.err = internal.ErrLoadBalancedWithMultipleHosts - return + return internal.ErrLoadBalancedWithMultipleHosts } if c.ReplicaSet != nil { - c.err = internal.ErrLoadBalancedWithReplicaSet - return + return internal.ErrLoadBalancedWithReplicaSet } if c.Direct != nil { - c.err = internal.ErrLoadBalancedWithDirectConnection - return + return internal.ErrLoadBalancedWithDirectConnection } } // Validation for srvMaxHosts. if c.SRVMaxHosts != nil && *c.SRVMaxHosts > 0 { if c.ReplicaSet != nil { - c.err = internal.ErrSRVMaxHostsWithReplicaSet + return internal.ErrSRVMaxHostsWithReplicaSet } if c.LoadBalanced != nil && *c.LoadBalanced { - c.err = internal.ErrSRVMaxHostsWithLoadBalanced + return internal.ErrSRVMaxHostsWithLoadBalanced } } + return nil } // GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during @@ -231,7 +239,7 @@ func (c *ClientOptions) GetURI() string { // If the URI format is incorrect or there are conflicting options specified in the URI an error will be recorded and // can be retrieved by calling Validate. // -// For more information about the URI format, see https://docs.mongodb.com/manual/reference/connection-string/. See +// For more information about the URI format, see https://www.mongodb.com/docs/manual/reference/connection-string/. See // mongo.Connect documentation for examples of using URIs for different Client configurations. func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { if c.err != nil { @@ -445,6 +453,10 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { c.DisableOCSPEndpointCheck = &cs.SSLDisableOCSPEndpointCheck } + if cs.TimeoutSet { + c.Timeout = &cs.Timeout + } + return c } @@ -470,12 +482,12 @@ func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions { // // 2. "zlib" - requires server version >= 3.6 // -// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver version >= 1.3.0 -// without cgo +// 3. "zstd" - requires server version >= 4.2, and driver version >= 1.2.0 with cgo support enabled or driver +// version >= 1.3.0 without cgo. // // If this option is specified, the driver will perform a negotiation with the server to determine a common list of of // compressors and will use the first one in that list when performing operations. See -// https://docs.mongodb.com/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more +// https://www.mongodb.com/docs/manual/reference/program/mongod/#cmdoption-mongod-networkmessagecompressors for more // information about configuring compression on the server and the server-side defaults. // // This can also be set through the "compressors" URI option (e.g. "compressors=zstd,zlib,snappy"). The default is @@ -574,7 +586,7 @@ func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions { // SetMaxPoolSize specifies that maximum number of connections allowed in the driver's connection pool to each server. // Requests to a server will block if this maximum is reached. This can also be set through the "maxPoolSize" URI option -// (e.g. "maxPoolSize=100"). The default is 100. If this is 0, it will be set to math.MaxInt64. +// (e.g. "maxPoolSize=100"). If this is 0, maximum connection pool size is not limited. The default is 100. func (c *ClientOptions) SetMaxPoolSize(u uint64) *ClientOptions { c.MaxPoolSize = &u return c @@ -636,7 +648,7 @@ func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptio // 3. "maxStalenessSeconds" (or "maxStaleness"): Specify a maximum replication lag for reads from secondaries in a // replica set (e.g. "maxStalenessSeconds=10"). // -// The default is readpref.Primary(). See https://docs.mongodb.com/manual/core/read-preference/#read-preference for +// The default is readpref.Primary(). See https://www.mongodb.com/docs/manual/core/read-preference/#read-preference for // more information about read preferences. func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions { c.ReadPreference = rp @@ -702,11 +714,30 @@ func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOption // SetSocketTimeout specifies how long the driver will wait for a socket read or write to return before returning a // network error. This can also be set through the "socketTimeoutMS" URI option (e.g. "socketTimeoutMS=1000"). The // default value is 0, meaning no timeout is used and socket operations can block indefinitely. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that a single operation can run on the Client +// before returning an error. func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions { c.SocketTimeout = &d return c } +// SetTimeout specifies the amount of time that a single operation run on this Client can execute before returning an error. +// The deadline of any operation run through the Client will be honored above any Timeout set on the Client; Timeout will only +// be honored if there is no deadline on the operation Context. Timeout can also be set through the "timeoutMS" URI option +// (e.g. "timeoutMS=1000"). The default value is nil, meaning operations do not inherit a timeout from the Client. +// +// If any Timeout is set (even 0) on the Client, the values of other, deprecated timeout-related options will be ignored. +// In particular: ClientOptions.SocketTimeout, WriteConcern.wTimeout, MaxTime on operations, and TransactionOptions.MaxCommitTime. +// +// NOTE(benjirewis): SetTimeout represents unstable, provisional API. The behavior of the driver when a Timeout is specified is +// subject to change. +func (c *ClientOptions) SetTimeout(d time.Duration) *ClientOptions { + c.Timeout = &d + return c +} + // SetTLSConfig specifies a tls.Config instance to use use to configure TLS on all connections created to the cluster. // This can also be set through the following URI options: // @@ -920,6 +951,9 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.SRVServiceName != nil { c.SRVServiceName = opt.SRVServiceName } + if opt.Timeout != nil { + c.Timeout = opt.Timeout + } if opt.TLSConfig != nil { c.TLSConfig = opt.TLSConfig } @@ -983,7 +1017,9 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := append(keyData, '\n') + data := make([]byte, 0, len(keyData)+len(certData)+1) + data = append(data, keyData...) + data = append(data, '\n') data = append(data, certData...) return addClientCertFromBytes(cfg, data, keyPassword) } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 5c8111471b..e8b68a2706 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -15,20 +15,20 @@ import ( // CollectionOptions represents options that can be used to configure a Collection. type CollectionOptions struct { - // The read concern to use for operations executed on the Collection. The default value is nil, which means that - // the read concern of the database used to configure the Collection will be used. + // ReadConcern is the read concern to use for operations executed on the Collection. The default value is nil, which means that + // the read concern of the Database used to configure the Collection will be used. ReadConcern *readconcern.ReadConcern - // The write concern to use for operations executed on the Collection. The default value is nil, which means that - // the write concern of the database used to configure the Collection will be used. + // WriteConcern is the write concern to use for operations executed on the Collection. The default value is nil, which means that + // the write concern of the Database used to configure the Collection will be used. WriteConcern *writeconcern.WriteConcern - // The read preference to use for operations executed on the Collection. The default value is nil, which means that - // the read preference of the database used to configure the Collection will be used. + // ReadPreference is the read preference to use for operations executed on the Collection. The default value is nil, which means that + // the read preference of the Database used to configure the Collection will be used. ReadPreference *readpref.ReadPref - // The BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value - // is nil, which means that the registry of the database used to configure the Collection will be used. + // Registry is the BSON registry to marshal and unmarshal documents for operations executed on the Collection. The default value + // is nil, which means that the registry of the Database used to configure the Collection will be used. Registry *bsoncodec.Registry } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go index 094524c100..06f5dce761 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/countoptions.go @@ -15,6 +15,13 @@ type CountOptions struct { // default value is nil, which means the default collation of the collection will be used. Collation *Collation + // TODO(GODRIVER-2386): CountOptions executor uses aggregation under the hood, which means this type has to be + // TODO a string for now. This can be replaced with `Comment interface{}` once 2386 is implemented. + + // A string or document that will be included in server logs, profiling logs, and currentOp queries to help trace + // the operation. The default is nil, which means that no comment will be included in the logs. + Comment *string + // The index to use for the aggregation. This should either be the index name as a string or the index specification // as a document. The driver will return an error if the hint parameter is a multi-key map. The default value is nil, // which means that no hint will be sent. @@ -26,6 +33,10 @@ type CountOptions struct { // The maximum amount of time that the query can run on the server. The default value is nil, meaning that there is // no time limit for query execution. + // + // Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general + // Timeout option should be used in its place to control the amount of time that the count operation can run before + // returning an error. MaxTime is still usable through the deprecated setter. MaxTime *time.Duration // The number of documents to skip before counting. The default value is 0. @@ -43,6 +54,12 @@ func (co *CountOptions) SetCollation(c *Collation) *CountOptions { return co } +// SetComment sets the value for the Comment field. +func (co *CountOptions) SetComment(c string) *CountOptions { + co.Comment = &c + return co +} + // SetHint sets the value for the Hint field. func (co *CountOptions) SetHint(h interface{}) *CountOptions { co.Hint = h @@ -56,6 +73,10 @@ func (co *CountOptions) SetLimit(i int64) *CountOptions { } // SetMaxTime sets the value for the MaxTime field. +// +// Deprecated: This option is deprecated and will eventually be removed in version 2.0 of the driver. The more general +// Timeout option should be used in its place to control the amount of time that the count operation can run before +// returning an error. func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions { co.MaxTime = &d return co @@ -77,6 +98,9 @@ func MergeCountOptions(opts ...*CountOptions) *CountOptions { if co.Collation != nil { countOpts.Collation = co.Collation } + if co.Comment != nil { + countOpts.Comment = co.Comment + } if co.Hint != nil { countOpts.Hint = co.Hint } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go index 130c8e75c3..6fc7d066a2 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/createcollectionoptions.go @@ -67,7 +67,7 @@ func (tso *TimeSeriesOptions) SetGranularity(granularity string) *TimeSeriesOpti // CreateCollectionOptions represents options that can be used to configure a CreateCollection operation. type CreateCollectionOptions struct { - // Specifies if the collection is capped (see https://docs.mongodb.com/manual/core/capped-collections/). If true, + // Specifies if the collection is capped (see https://www.mongodb.com/docs/manual/core/capped-collections/). If true, // the SizeInBytes option must also be specified. The default value is false. Capped *bool @@ -75,6 +75,12 @@ type CreateCollectionOptions struct { // For previous server versions, the driver will return an error if this option is used. The default value is nil. Collation *Collation + // Specifies how change streams opened against the collection can return pre- and post-images of updated + // documents. The value must be a document in the form {